From: Joel Rosdahl Date: Mon, 3 Aug 2020 05:18:19 +0000 (+0200) Subject: Convert cc_log calls to Logging::log X-Git-Tag: v4.0~232 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b8a69ab8654a515331402f3d2bfd5a7b458a5c25;p=thirdparty%2Fccache.git Convert cc_log calls to Logging::log --- diff --git a/src/Context.cpp b/src/Context.cpp index 015827685..2d9155965 100644 --- a/src/Context.cpp +++ b/src/Context.cpp @@ -29,6 +29,7 @@ #include #include +using Logging::log; using nonstd::string_view; Context::Context() @@ -106,7 +107,7 @@ void Context::unlink_pending_tmp_files_signal_safe() { for (const std::string& path : m_pending_tmp_files) { - // Don't call Util::unlink_tmp since its cc_log calls aren't signal safe. + // Don't call Util::unlink_tmp since its log calls aren't signal safe. unlink(path.c_str()); } // Don't clear m_pending_tmp_files since this method must be signal safe. @@ -131,7 +132,7 @@ Context::set_ignore_options(const std::vector& options) if (n_wildcards == 0 || (n_wildcards == 1 && option.back() == '*')) { m_ignore_options.push_back(option); } else { - cc_log("Skipping malformed ignore_options item: %s", option.c_str()); + log("Skipping malformed ignore_options item: {}", option); continue; } } diff --git a/src/Hash.cpp b/src/Hash.cpp index 9d6079c65..78d5e7c47 100644 --- a/src/Hash.cpp +++ b/src/Hash.cpp @@ -21,6 +21,7 @@ #include "Fd.hpp" #include "Logging.hpp" +using Logging::log; using nonstd::string_view; const string_view HASH_DELIMITER("\000cCaChE\000", 8); @@ -115,7 +116,7 @@ Hash::hash_file(const std::string& path) { Fd fd(open(path.c_str(), O_RDONLY | O_BINARY)); if (!fd) { - cc_log("Failed to open %s: %s", path.c_str(), strerror(errno)); + log("Failed to open {}: {}", path, strerror(errno)); return false; } diff --git a/src/InodeCache.cpp b/src/InodeCache.cpp index 6c4d7b8e0..d573afd04 100644 --- a/src/InodeCache.cpp +++ b/src/InodeCache.cpp @@ -32,6 +32,8 @@ #include #include +using Logging::log; + // The inode cache resides on a file that is mapped into shared memory by // running processes. It is implemented as a two level structure, where the top // level is a hash table consisting of buckets. Each bucket contains entries @@ -130,31 +132,29 @@ InodeCache::mmap_file(const std::string& inode_cache_file) } Fd fd(open(inode_cache_file.c_str(), O_RDWR)); if (!fd) { - cc_log("Failed to open inode cache %s: %s", - inode_cache_file.c_str(), - strerror(errno)); + log("Failed to open inode cache {}: {}", inode_cache_file, strerror(errno)); return false; } bool is_nfs; if (Util::is_nfs_fd(*fd, &is_nfs) == 0 && is_nfs) { - cc_log( - "Inode cache not supported because the cache file is located on nfs: %s", - inode_cache_file.c_str()); + log( + "Inode cache not supported because the cache file is located on nfs: {}", + inode_cache_file); return false; } SharedRegion* sr = reinterpret_cast(mmap( nullptr, sizeof(SharedRegion), PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0)); fd.close(); if (sr == reinterpret_cast(-1)) { - cc_log("Failed to mmap %s: %s", inode_cache_file.c_str(), strerror(errno)); + log("Failed to mmap {}: {}", inode_cache_file, strerror(errno)); return false; } // Drop the file from disk if the found version is not matching. This will // allow a new file to be generated. if (sr->version != k_version) { - cc_log( - "Dropping inode cache because found version %u does not match expected" - " version %u", + log( + "Dropping inode cache because found version {} does not match expected" + " version {}", sr->version, k_version); munmap(sr, sizeof(SharedRegion)); @@ -163,7 +163,7 @@ InodeCache::mmap_file(const std::string& inode_cache_file) } m_sr = sr; if (m_config.debug()) { - cc_log("inode cache file loaded: %s", inode_cache_file.c_str()); + log("inode cache file loaded: {}", inode_cache_file); } return true; } @@ -175,8 +175,7 @@ InodeCache::hash_inode(const std::string& path, { Stat stat = Stat::stat(path); if (!stat) { - cc_log( - "Could not stat %s: %s", path.c_str(), strerror(stat.error_number())); + log("Could not stat {}: {}", path, strerror(stat.error_number())); return false; } @@ -216,18 +215,18 @@ InodeCache::acquire_bucket(uint32_t index) } err = pthread_mutex_consistent(&bucket->mt); if (err) { - cc_log( - "Can't consolidate stale mutex at index %u: %s", index, strerror(err)); - cc_log("Consider removing the inode cache file if the problem persists"); + log( + "Can't consolidate stale mutex at index {}: {}", index, strerror(err)); + log("Consider removing the inode cache file if the problem persists"); return nullptr; } - cc_log("Wiping bucket at index %u because of stale mutex", index); + log("Wiping bucket at index {} because of stale mutex", index); memset(bucket->entries, 0, sizeof(Bucket::entries)); } else { #endif if (err) { - cc_log("Failed to lock mutex at index %u: %s", index, strerror(err)); - cc_log("Consider removing the inode cache file if problem persists"); + log("Failed to lock mutex at index {}: {}", index, strerror(err)); + log("Consider removing the inode cache file if problem persists"); ++m_sr->errors; return nullptr; } @@ -254,7 +253,7 @@ InodeCache::release_bucket(Bucket* bucket) bool InodeCache::create_new_file(const std::string& filename) { - cc_log("Creating a new inode cache"); + log("Creating a new inode cache"); // Create the new file to a temporary name to prevent other processes from // mapping it before it is fully initialized. @@ -264,15 +263,15 @@ InodeCache::create_new_file(const std::string& filename) bool is_nfs; if (Util::is_nfs_fd(*tmp_file.fd, &is_nfs) == 0 && is_nfs) { - cc_log( + log( "Inode cache not supported because the cache file would be located on" - " nfs: %s", - filename.c_str()); + " nfs: {}", + filename); return false; } int err = Util::fallocate(*tmp_file.fd, sizeof(SharedRegion)); if (err) { - cc_log("Failed to allocate file space for inode cache: %s", strerror(err)); + log("Failed to allocate file space for inode cache: {}", strerror(err)); return false; } SharedRegion* sr = @@ -283,7 +282,7 @@ InodeCache::create_new_file(const std::string& filename) *tmp_file.fd, 0)); if (sr == reinterpret_cast(-1)) { - cc_log("Failed to mmap new inode cache: %s", strerror(errno)); + log("Failed to mmap new inode cache: {}", strerror(errno)); return false; } @@ -308,7 +307,7 @@ InodeCache::create_new_file(const std::string& filename) // which will make us use the first created file even if we didn't win the // race. if (link(tmp_file.path.c_str(), filename.c_str()) != 0) { - cc_log("Failed to link new inode cache: %s", strerror(errno)); + log("Failed to link new inode cache: {}", strerror(errno)); return false; } @@ -398,7 +397,7 @@ InodeCache::get(const std::string& path, } release_bucket(bucket); - cc_log("inode cache %s: %s", found ? "hit" : "miss", path.c_str()); + log("inode cache {}: {}", found ? "hit" : "miss", path); if (m_config.debug()) { if (found) { @@ -406,11 +405,10 @@ InodeCache::get(const std::string& path, } else { ++m_sr->misses; } - cc_log( - "accumulated stats for inode cache: hits=%ld, misses=%ld, errors=%ld", - static_cast(m_sr->hits.load()), - static_cast(m_sr->misses.load()), - static_cast(m_sr->errors.load())); + log("accumulated stats for inode cache: hits={}, misses={}, errors={}", + m_sr->hits.load(), + m_sr->misses.load(), + m_sr->errors.load()); } return found; } @@ -446,7 +444,7 @@ InodeCache::put(const std::string& path, release_bucket(bucket); - cc_log("inode cache insert: %s", path.c_str()); + log("inode cache insert: {}", path); return true; } diff --git a/src/Lockfile.cpp b/src/Lockfile.cpp index e33a98d22..c1576800f 100644 --- a/src/Lockfile.cpp +++ b/src/Lockfile.cpp @@ -27,6 +27,8 @@ #include "third_party/fmt/core.h" +using Logging::log; + namespace { #ifndef _WIN32 @@ -49,9 +51,7 @@ do_acquire_posix(const std::string& lockfile, uint32_t staleness_limit) } int saved_errno = errno; - cc_log("lockfile_acquire: symlink %s: %s", - lockfile.c_str(), - strerror(saved_errno)); + log("lockfile_acquire: symlink {}: {}", lockfile, strerror(saved_errno)); if (saved_errno == ENOENT) { // Directory doesn't exist? if (Util::create_dir(Util::dir_name(lockfile))) { @@ -78,45 +78,41 @@ do_acquire_posix(const std::string& lockfile, uint32_t staleness_limit) // acquiring it. continue; } else { - cc_log("lockfile_acquire: readlink %s: %s", - lockfile.c_str(), - strerror(errno)); + log("lockfile_acquire: readlink {}: {}", lockfile, strerror(errno)); return false; } } if (content == my_content) { // Lost NFS reply? - cc_log("lockfile_acquire: symlink %s failed but we got the lock anyway", - lockfile.c_str()); + log("lockfile_acquire: symlink {} failed but we got the lock anyway", + lockfile); return true; } // A possible improvement here would be to check if the process holding the // lock is still alive and break the lock early if it isn't. - cc_log("lockfile_acquire: lock info for %s: %s", - lockfile.c_str(), - content.c_str()); + log("lockfile_acquire: lock info for {}: {}", lockfile, content); if (initial_content.empty()) { initial_content = content; } if (slept <= staleness_limit) { - cc_log("lockfile_acquire: failed to acquire %s; sleeping %u microseconds", - lockfile.c_str(), - to_sleep); + log("lockfile_acquire: failed to acquire {}; sleeping {} microseconds", + lockfile, + to_sleep); usleep(to_sleep); slept += to_sleep; to_sleep = std::min(max_to_sleep, 2 * to_sleep); } else if (content != initial_content) { - cc_log("lockfile_acquire: gave up acquiring %s", lockfile.c_str()); + log("lockfile_acquire: gave up acquiring {}", lockfile); return false; } else { // The lock seems to be stale -- break it and try again. - cc_log("lockfile_acquire: breaking %s", lockfile.c_str()); + log("lockfile_acquire: breaking {}", lockfile); if (!Util::unlink_tmp(lockfile)) { - cc_log("Failed to unlink %s: %s", lockfile.c_str(), strerror(errno)); + log("Failed to unlink {}: {}", lockfile, strerror(errno)); return false; } to_sleep = 1000; @@ -151,10 +147,10 @@ do_acquire_win32(const std::string& lockfile, uint32_t staleness_limit) } DWORD error = GetLastError(); - cc_log("lockfile_acquire: CreateFile %s: %s (%lu)", - lockfile.c_str(), - Win32Util::error_message(error).c_str(), - error); + log("lockfile_acquire: CreateFile {}: {} ({})", + lockfile, + Win32Util::error_message(error), + error); if (error == ERROR_PATH_NOT_FOUND) { // Directory doesn't exist? if (Util::create_dir(Util::dir_name(lockfile)) == 0) { @@ -171,13 +167,13 @@ do_acquire_win32(const std::string& lockfile, uint32_t staleness_limit) } if (slept > staleness_limit) { - cc_log("lockfile_acquire: gave up acquiring %s", lockfile.c_str()); + log("lockfile_acquire: gave up acquiring {}", lockfile); break; } - cc_log("lockfile_acquire: failed to acquire %s; sleeping %u microseconds", - lockfile.c_str(), - to_sleep); + log("lockfile_acquire: failed to acquire {}; sleeping {} microseconds", + lockfile, + to_sleep); usleep(to_sleep); slept += to_sleep; to_sleep = std::min(max_to_sleep, 2 * to_sleep); @@ -199,19 +195,19 @@ Lockfile::Lockfile(const std::string& path, uint32_t staleness_limit) m_handle = do_acquire_win32(m_lockfile, staleness_limit); #endif if (acquired()) { - cc_log("Acquired lock %s", m_lockfile.c_str()); + log("Acquired lock {}", m_lockfile); } else { - cc_log("Failed to acquire lock %s", m_lockfile.c_str()); + log("Failed to acquire lock {}", m_lockfile); } } Lockfile::~Lockfile() { if (acquired()) { - cc_log("Releasing lock %s", m_lockfile.c_str()); + log("Releasing lock {}", m_lockfile); #ifndef _WIN32 if (!Util::unlink_tmp(m_lockfile)) { - cc_log("Failed to unlink %s: %s", m_lockfile.c_str(), strerror(errno)); + log("Failed to unlink {}: {}", m_lockfile, strerror(errno)); } #else CloseHandle(m_handle); diff --git a/src/Logging.cpp b/src/Logging.cpp index db895ca7e..e51e5fc72 100644 --- a/src/Logging.cpp +++ b/src/Logging.cpp @@ -189,20 +189,3 @@ dump_log(const std::string& path) } } // namespace Logging - -void -cc_log(const char* format, ...) -{ - if (!Logging::enabled()) { - return; - } - - va_list ap; - va_start(ap, format); - - char buffer[16384]; - int size = vsnprintf(buffer, sizeof(buffer), format, ap); - Logging::log(string_view(buffer, size)); - - va_end(ap); -} diff --git a/src/Logging.hpp b/src/Logging.hpp index f074b0265..278812bd5 100644 --- a/src/Logging.hpp +++ b/src/Logging.hpp @@ -75,6 +75,3 @@ bulk_log(T&&... args) } } // namespace Logging - -// Legacy API. -void cc_log(const char* format, ...) ATTR_FORMAT(printf, 1, 2); diff --git a/src/Result.cpp b/src/Result.cpp index 02b8e30e5..161b2ae4d 100644 --- a/src/Result.cpp +++ b/src/Result.cpp @@ -87,6 +87,7 @@ // // 1: Introduced in ccache 4.0. +using Logging::log; using nonstd::nullopt; using nonstd::optional; @@ -179,7 +180,7 @@ Result::Reader::Reader(const std::string& result_path) optional Result::Reader::read(Consumer& consumer) { - cc_log("Reading result %s", m_result_path.c_str()); + log("Reading result {}", m_result_path); try { if (read_result(consumer)) { @@ -335,17 +336,17 @@ Writer::do_finalize() for (const auto& pair : m_entries_to_write) { const auto file_type = pair.first; const auto& path = pair.second; - cc_log("Storing result %s", path.c_str()); + log("Storing result {}", path); const bool store_raw = should_store_raw_file(m_ctx.config, file_type); uint64_t file_size = Stat::stat(path, Stat::OnError::throw_error).size(); - cc_log("Storing %s file #%u %s (%llu bytes) from %s", - store_raw ? "raw" : "embedded", - entry_number, - file_type_to_string(file_type), - (unsigned long long)file_size, - path.c_str()); + log("Storing {} file #{} {} ({} bytes) from {}", + store_raw ? "raw" : "embedded", + entry_number, + file_type_to_string(file_type), + file_size, + path); writer.write(store_raw ? k_raw_file_marker : k_embedded_file_marker); diff --git a/src/ResultExtractor.cpp b/src/ResultExtractor.cpp index 531f0b4c8..17891bd09 100644 --- a/src/ResultExtractor.cpp +++ b/src/ResultExtractor.cpp @@ -22,7 +22,7 @@ #include "third_party/nonstd/string_view.hpp" -using string_view = nonstd::string_view; +using nonstd::string_view; ResultExtractor::ResultExtractor(const std::string& directory) : m_directory(directory) diff --git a/src/ResultRetriever.cpp b/src/ResultRetriever.cpp index 37d30f5f2..db78b4790 100644 --- a/src/ResultRetriever.cpp +++ b/src/ResultRetriever.cpp @@ -23,8 +23,9 @@ #include "third_party/nonstd/string_view.hpp" +using Logging::log; +using nonstd::string_view; using Result::FileType; -using string_view = nonstd::string_view; ResultRetriever::ResultRetriever(Context& ctx, bool rewrite_dependency_target) : m_ctx(ctx), m_rewrite_dependency_target(rewrite_dependency_target) @@ -89,15 +90,15 @@ ResultRetriever::on_entry_start(uint32_t entry_number, } if (dest_path.empty()) { - cc_log("Not copying"); + log("Not copying"); } else if (dest_path == "/dev/null") { - cc_log("Not copying to /dev/null"); + log("Not copying to /dev/null"); } else { - cc_log("Retrieving %s file #%u %s (%llu bytes)", - raw_file ? "raw" : "embedded", - entry_number, - Result::file_type_to_string(file_type), - (unsigned long long)file_len); + log("Retrieving {} file #{} {} ({} bytes)", + raw_file ? "raw" : "embedded", + entry_number, + Result::file_type_to_string(file_type), + file_len); if (raw_file) { Util::clone_hard_link_or_copy_file(m_ctx, *raw_file, dest_path, false); @@ -106,7 +107,7 @@ ResultRetriever::on_entry_start(uint32_t entry_number, // if hard-linked, to make the object file newer than the source file). Util::update_mtime(*raw_file); } else { - cc_log("Copying to %s", dest_path.c_str()); + log("Copying to {}", dest_path); m_dest_fd = Fd( open(dest_path.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0666)); if (!m_dest_fd) { diff --git a/src/Stat.cpp b/src/Stat.cpp index ae6215447..3648a3166 100644 --- a/src/Stat.cpp +++ b/src/Stat.cpp @@ -22,6 +22,8 @@ #include "third_party/fmt/core.h" +using Logging::log; + Stat::Stat(StatFunction stat_function, const std::string& path, Stat::OnError on_error) @@ -35,7 +37,7 @@ Stat::Stat(StatFunction stat_function, throw Error(fmt::format("failed to stat {}: {}", path, strerror(errno))); } if (on_error == OnError::log) { - cc_log("Failed to stat %s: %s", path.c_str(), strerror(errno)); + log("Failed to stat {}: {}", path, strerror(errno)); } // The file is missing, so just zero fill the stat structure. This will diff --git a/src/Util.cpp b/src/Util.cpp index c7a6e6e11..e27328b72 100644 --- a/src/Util.cpp +++ b/src/Util.cpp @@ -68,6 +68,7 @@ # endif #endif +using Logging::log; using nonstd::nullopt; using nonstd::optional; using nonstd::string_view; @@ -235,28 +236,28 @@ clone_hard_link_or_copy_file(const Context& ctx, bool via_tmp_file) { if (ctx.config.file_clone()) { - cc_log("Cloning %s to %s", source.c_str(), dest.c_str()); + log("Cloning {} to {}", source, dest); try { clone_file(source, dest, via_tmp_file); return; } catch (Error& e) { - cc_log("Failed to clone: %s", e.what()); + log("Failed to clone: {}", e.what()); } } if (ctx.config.hard_link()) { unlink(dest.c_str()); - cc_log("Hard linking %s to %s", source.c_str(), dest.c_str()); + log("Hard linking {} to {}", source, dest); int ret = link(source.c_str(), dest.c_str()); if (ret == 0) { if (chmod(dest.c_str(), 0444) != 0) { - cc_log("Failed to chmod: %s", strerror(errno)); + log("Failed to chmod: {}", strerror(errno)); } return; } - cc_log("Failed to hard link: %s", strerror(errno)); + log("Failed to hard link: {}", strerror(errno)); } - cc_log("Copying %s to %s", source.c_str(), dest.c_str()); + log("Copying {} to {}", source, dest); copy_file(source, dest, via_tmp_file); } @@ -1044,7 +1045,7 @@ read_file(const std::string& path, size_t size_hint) } if (ret == -1) { - cc_log("Failed reading %s", path.c_str()); + log("Failed reading {}", path); throw Error(strerror(errno)); } @@ -1328,9 +1329,9 @@ unlink_safe(const std::string& path, UnlinkLog unlink_log) } } if (success || unlink_log == UnlinkLog::log_failure) { - cc_log("Unlink %s via %s", path.c_str(), tmp_name.c_str()); + log("Unlink {} via {}", path, tmp_name); if (!success) { - cc_log("Unlink failed: %s", strerror(saved_errno)); + log("Unlink failed: {}", strerror(saved_errno)); } } @@ -1346,9 +1347,9 @@ unlink_tmp(const std::string& path, UnlinkLog unlink_log) bool success = unlink(path.c_str()) == 0 || (errno == ENOENT || errno == ESTALE); if (success || unlink_log == UnlinkLog::log_failure) { - cc_log("Unlink %s", path.c_str()); + log("Unlink {}", path); if (!success) { - cc_log("Unlink failed: %s", strerror(saved_errno)); + log("Unlink failed: {}", strerror(saved_errno)); } } diff --git a/src/ZstdCompressor.cpp b/src/ZstdCompressor.cpp index 1524808ca..e1fa46d0e 100644 --- a/src/ZstdCompressor.cpp +++ b/src/ZstdCompressor.cpp @@ -21,6 +21,8 @@ #include "Logging.hpp" #include "exceptions.hpp" +using Logging::log; + const uint8_t k_default_zstd_compression_level = 1; ZstdCompressor::ZstdCompressor(FILE* stream, int8_t compression_level) @@ -28,25 +30,25 @@ ZstdCompressor::ZstdCompressor(FILE* stream, int8_t compression_level) { if (compression_level == 0) { compression_level = k_default_zstd_compression_level; - cc_log("Using default compression level %d", compression_level); + log("Using default compression level {}", compression_level); } // libzstd 1.3.4 and newer support negative levels. However, the query // function ZSTD_minCLevel did not appear until 1.3.6, so perform detection // based on version instead. if (ZSTD_versionNumber() < 10304 && compression_level < 1) { - cc_log( + log( "Using compression level 1 (minimum level supported by libzstd) instead" - " of %d", + " of {}", compression_level); compression_level = 1; } m_compression_level = std::min(compression_level, ZSTD_maxCLevel()); if (m_compression_level != compression_level) { - cc_log("Using compression level %d (max libzstd level) instead of %d", - m_compression_level, - compression_level); + log("Using compression level {} (max libzstd level) instead of {}", + m_compression_level, + compression_level); } size_t ret = ZSTD_initCStream(m_zstd_stream, m_compression_level); diff --git a/src/argprocessing.cpp b/src/argprocessing.cpp index b471128c5..bd3c30b00 100644 --- a/src/argprocessing.cpp +++ b/src/argprocessing.cpp @@ -26,6 +26,7 @@ #include +using Logging::log; using nonstd::nullopt; using nonstd::optional; using nonstd::string_view; @@ -103,14 +104,14 @@ detect_pch(Context& ctx, std::string pch_file; if (option == "-include-pch" || option == "-include-pth") { if (Stat::stat(arg)) { - cc_log("Detected use of precompiled header: %s", arg.c_str()); + log("Detected use of precompiled header: {}", arg); pch_file = arg; } } else if (!is_cc1_option) { for (const auto& extension : {".gch", ".pch", ".pth"}) { std::string path = arg + extension; if (Stat::stat(path)) { - cc_log("Detected use of precompiled header: %s", path.c_str()); + log("Detected use of precompiled header: {}", path); pch_file = path; } } @@ -118,9 +119,9 @@ detect_pch(Context& ctx, if (!pch_file.empty()) { if (!ctx.included_pch_file.empty()) { - cc_log("Multiple precompiled headers used: %s and %s", - ctx.included_pch_file.c_str(), - pch_file.c_str()); + log("Multiple precompiled headers used: {} and {}", + ctx.included_pch_file, + pch_file); return false; } ctx.included_pch_file = pch_file; @@ -175,13 +176,13 @@ process_profiling_option(Context& ctx, const std::string& arg) new_profile_use = true; new_profile_path = arg.substr(arg.find('=') + 1); } else { - cc_log("Unknown profiling option: %s", arg.c_str()); + log("Unknown profiling option: {}", arg); return false; } if (new_profile_use) { if (ctx.args_info.profile_use) { - cc_log("Multiple profiling options not supported"); + log("Multiple profiling options not supported"); return false; } ctx.args_info.profile_use = true; @@ -189,12 +190,12 @@ process_profiling_option(Context& ctx, const std::string& arg) if (!new_profile_path.empty()) { ctx.args_info.profile_path = new_profile_path; - cc_log("Set profile directory to %s", ctx.args_info.profile_path.c_str()); + log("Set profile directory to {}", ctx.args_info.profile_path); } if (ctx.args_info.profile_generate && ctx.args_info.profile_use) { // Too hard to figure out what the compiler will do. - cc_log("Both generating and using profile info, giving up"); + log("Both generating and using profile info, giving up"); return false; } @@ -226,7 +227,7 @@ process_arg(Context& ctx, if (args[i] == "--ccache-skip") { i++; if (i == args.size()) { - cc_log("--ccache-skip lacks an argument"); + log("--ccache-skip lacks an argument"); return STATS_ARGS; } state.common_args.push_back(args[i]); @@ -247,7 +248,7 @@ process_arg(Context& ctx, } auto file_args = Args::from_gcc_atfile(argpath); if (!file_args) { - cc_log("Couldn't read arg file %s", argpath); + log("Couldn't read arg file {}", argpath); return STATS_ARGS; } @@ -260,7 +261,7 @@ process_arg(Context& ctx, if (ctx.guessed_compiler == GuessedCompiler::nvcc && (args[i] == "-optf" || args[i] == "--options-file")) { if (i == args.size() - 1) { - cc_log("Expected argument after %s", args[i].c_str()); + log("Expected argument after {}", args[i]); return STATS_ARGS; } ++i; @@ -270,7 +271,7 @@ process_arg(Context& ctx, for (auto it = paths.rbegin(); it != paths.rend(); ++it) { auto file_args = Args::from_gcc_atfile(*it); if (!file_args) { - cc_log("Couldn't read CUDA options file %s", it->c_str()); + log("Couldn't read CUDA options file {}", *it); return STATS_ARGS; } @@ -283,19 +284,19 @@ process_arg(Context& ctx, // These are always too hard. if (compopt_too_hard(args[i]) || Util::starts_with(args[i], "-fdump-") || Util::starts_with(args[i], "-MJ")) { - cc_log("Compiler option %s is unsupported", args[i].c_str()); + log("Compiler option {} is unsupported", args[i]); return STATS_UNSUPPORTED_OPTION; } // These are too hard in direct mode. if (config.direct_mode() && compopt_too_hard_for_direct_mode(args[i])) { - cc_log("Unsupported compiler option for direct mode: %s", args[i].c_str()); + log("Unsupported compiler option for direct mode: {}", args[i]); config.set_direct_mode(false); } // -Xarch_* options are too hard. if (Util::starts_with(args[i], "-Xarch_")) { - cc_log("Unsupported compiler option: %s", args[i].c_str()); + log("Unsupported compiler option: {}", args[i]); return STATS_UNSUPPORTED_OPTION; } @@ -333,7 +334,7 @@ process_arg(Context& ctx, || (ctx.guessed_compiler == GuessedCompiler::nvcc && args[i] == "-Werror")) { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } state.compiler_only_args.push_back(args[i + 1]); @@ -356,11 +357,11 @@ process_arg(Context& ctx, // flag. if (args[i] == "-fmodules") { if (!config.depend_mode() || !config.direct_mode()) { - cc_log("Compiler option %s is unsupported without direct depend mode", - args[i].c_str()); + log("Compiler option {} is unsupported without direct depend mode", + args[i]); return STATS_CANTUSEMODULES; } else if (!(config.sloppiness() & SLOPPY_MODULES)) { - cc_log( + log( "You have to specify \"modules\" sloppiness when using" " -fmodules to get hits"); return STATS_CANTUSEMODULES; @@ -400,7 +401,7 @@ process_arg(Context& ctx, // input file and strip all -x options from the arguments. if (args[i] == "-x") { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } if (args_info.input_file.empty()) { @@ -419,7 +420,7 @@ process_arg(Context& ctx, // We need to work out where the output was meant to go. if (args[i] == "-o") { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } args_info.output_obj = Util::make_relative_path(ctx, args[i + 1]); @@ -494,7 +495,7 @@ process_arg(Context& ctx, if (separate_argument) { // -MF arg if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } dep_file = args[i + 1]; @@ -520,7 +521,7 @@ process_arg(Context& ctx, if (args[i].size() == 3) { // -MQ arg or -MT arg if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } state.dep_args.push_back(args[i]); @@ -589,7 +590,7 @@ process_arg(Context& ctx, // Alternate form of specifying sysroot without = if (args[i] == "--sysroot") { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } state.common_args.push_back(args[i]); @@ -602,7 +603,7 @@ process_arg(Context& ctx, // Alternate form of specifying target without = if (args[i] == "-target") { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } state.common_args.push_back(args[i]); @@ -617,7 +618,7 @@ process_arg(Context& ctx, // -P removes preprocessor information in such a way that the object file // from compiling the preprocessed file will not be equal to the object // file produced when compiling without ccache. - cc_log("Too hard option -Wp,-P detected"); + log("Too hard option -Wp,-P detected"); return STATS_UNSUPPORTED_OPTION; } else if (Util::starts_with(args[i], "-Wp,-MD,") && args[i].find(',', 8) == std::string::npos) { @@ -652,8 +653,7 @@ process_arg(Context& ctx, } else if (config.direct_mode()) { // -Wp, can be used to pass too hard options to the preprocessor. // Hence, disable direct mode. - cc_log("Unsupported compiler option for direct mode: %s", - args[i].c_str()); + log("Unsupported compiler option for direct mode: {}", args[i]); config.set_direct_mode(false); } @@ -675,7 +675,7 @@ process_arg(Context& ctx, if (args[i] == "--serialize-diagnostics") { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } args_info.generating_diagnostics = true; @@ -730,7 +730,7 @@ process_arg(Context& ctx, // among multiple users. i++; if (i <= args.size() - 1) { - cc_log("Skipping argument -index-store-path %s", args[i].c_str()); + log("Skipping argument -index-store-path {}", args[i]); } return nullopt; } @@ -740,7 +740,7 @@ process_arg(Context& ctx, // output produced by the compiler will be normalized. if (compopt_takes_path(args[i])) { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } @@ -792,7 +792,7 @@ process_arg(Context& ctx, // Options that take an argument. if (compopt_takes_arg(args[i])) { if (i == args.size() - 1) { - cc_log("Missing argument to %s", args[i].c_str()); + log("Missing argument to {}", args[i]); return STATS_ARGS; } @@ -826,8 +826,7 @@ process_arg(Context& ctx, if (args[i] != "/dev/null") { auto st = Stat::stat(args[i]); if (!st || !st.is_regular()) { - cc_log("%s is not a regular file, not considering as input file", - args[i].c_str()); + log("{} is not a regular file, not considering as input file", args[i]); state.common_args.push_back(args[i]); return nullopt; } @@ -835,19 +834,17 @@ process_arg(Context& ctx, if (!args_info.input_file.empty()) { if (!language_for_file(args[i]).empty()) { - cc_log("Multiple input files: %s and %s", - args_info.input_file.c_str(), - args[i].c_str()); + log("Multiple input files: {} and {}", args_info.input_file, args[i]); return STATS_MULTIPLE; } else if (!state.found_c_opt && !state.found_dc_opt) { - cc_log("Called for link with %s", args[i].c_str()); + log("Called for link with {}", args[i]); if (args[i].find("conftest.") != std::string::npos) { return STATS_CONFTEST; } else { return STATS_LINK; } } else { - cc_log("Unsupported source extension: %s", args[i].c_str()); + log("Unsupported source extension: {}", args[i]); return STATS_SOURCELANG; } } @@ -950,24 +947,24 @@ process_args(Context& ctx, } if (state.generating_debuginfo_level_3 && !config.run_second_cpp()) { - cc_log("Generating debug info level 3; not compiling preprocessed code"); + log("Generating debug info level 3; not compiling preprocessed code"); config.set_run_second_cpp(true); } handle_dependency_environment_variables(ctx, state); if (args_info.input_file.empty()) { - cc_log("No input file found"); + log("No input file found"); return STATS_NOINPUT; } if (state.found_pch || state.found_fpch_preprocess) { args_info.using_precompiled_header = true; if (!(config.sloppiness() & SLOPPY_TIME_MACROS)) { - cc_log( + log( "You have to specify \"time_macros\" sloppiness when using" " precompiled headers to get direct hits"); - cc_log("Disabling direct mode"); + log("Disabling direct mode"); return STATS_CANTUSEPCH; } } @@ -982,7 +979,7 @@ process_args(Context& ctx, state.file_language = language_for_file(args_info.input_file); if (!state.explicit_language.empty()) { if (!language_is_supported(state.explicit_language)) { - cc_log("Unsupported language: %s", state.explicit_language.c_str()); + log("Unsupported language: {}", state.explicit_language); return STATS_SOURCELANG; } args_info.actual_language = state.explicit_language; @@ -996,7 +993,7 @@ process_args(Context& ctx, if (args_info.output_is_precompiled_header && !(config.sloppiness() & SLOPPY_PCH_DEFINES)) { - cc_log( + log( "You have to specify \"pch_defines,time_macros\" sloppiness when" " creating precompiled headers"); return STATS_CANTUSEPCH; @@ -1006,7 +1003,7 @@ process_args(Context& ctx, if (args_info.output_is_precompiled_header) { state.common_args.push_back("-c"); } else { - cc_log("No -c option found"); + log("No -c option found"); // Having a separate statistic for autoconf tests is useful, as they are // the dominant form of "called for link" in many cases. return args_info.input_file.find("conftest.") != std::string::npos @@ -1016,12 +1013,12 @@ process_args(Context& ctx, } if (args_info.actual_language.empty()) { - cc_log("Unsupported source extension: %s", args_info.input_file.c_str()); + log("Unsupported source extension: {}", args_info.input_file); return STATS_SOURCELANG; } if (!config.run_second_cpp() && args_info.actual_language == "cu") { - cc_log("Using CUDA compiler; not compiling preprocessed code"); + log("Using CUDA compiler; not compiling preprocessed code"); config.set_run_second_cpp(true); } @@ -1029,7 +1026,7 @@ process_args(Context& ctx, if (args_info.output_is_precompiled_header && !config.run_second_cpp()) { // It doesn't work to create the .gch from preprocessed source. - cc_log("Creating precompiled header; not compiling preprocessed code"); + log("Creating precompiled header; not compiling preprocessed code"); config.set_run_second_cpp(true); } @@ -1040,7 +1037,7 @@ process_args(Context& ctx, // Don't try to second guess the compilers heuristics for stdout handling. if (args_info.output_obj == "-") { - cc_log("Output file is -"); + log("Output file is -"); return STATS_OUTSTDOUT; } @@ -1057,7 +1054,7 @@ process_args(Context& ctx, if (args_info.seen_split_dwarf) { size_t pos = args_info.output_obj.rfind('.'); if (pos == std::string::npos || pos == args_info.output_obj.size() - 1) { - cc_log("Badly formed object filename"); + log("Badly formed object filename"); return STATS_ARGS; } @@ -1068,7 +1065,7 @@ process_args(Context& ctx, if (args_info.output_obj != "/dev/null") { auto st = Stat::stat(args_info.output_obj); if (st && !st.is_regular()) { - cc_log("Not a regular file: %s", args_info.output_obj.c_str()); + log("Not a regular file: {}", args_info.output_obj); return STATS_BADOUTPUTFILE; } } @@ -1076,7 +1073,7 @@ process_args(Context& ctx, auto output_dir = std::string(Util::dir_name(args_info.output_obj)); auto st = Stat::stat(output_dir); if (!st || !st.is_directory()) { - cc_log("Directory does not exist: %s", output_dir.c_str()); + log("Directory does not exist: {}", output_dir); return STATS_BADOUTPUTFILE; } diff --git a/src/ccache.cpp b/src/ccache.cpp index 8ba561b8a..2bfefb731 100644 --- a/src/ccache.cpp +++ b/src/ccache.cpp @@ -160,7 +160,7 @@ add_prefix(const Context& ctx, Args& args, const std::string& prefix_command) prefix.push_back(path); } - cc_log("Using command-line prefix %s", prefix_command.c_str()); + log("Using command-line prefix {}", prefix_command); for (size_t i = prefix.size(); i != 0; i--) { args.push_front(prefix[i - 1]); } @@ -224,7 +224,7 @@ init_hash_debug(Context& ctx, hash.enable_debug(section_name, debug_binary_file.get(), debug_text_file); ctx.hash_debug_files.push_back(std::move(debug_binary_file)); } else { - cc_log("Failed to open %s: %s", path.c_str(), strerror(errno)); + log("Failed to open {}: {}", path, strerror(errno)); } } @@ -301,7 +301,7 @@ do_remember_include_file(Context& ctx, } if (!st.is_regular()) { // Device, pipe, socket or other strange creature. - cc_log("Non-regular include file %s", path.c_str()); + log("Non-regular include file {}", path); return false; } @@ -316,14 +316,14 @@ do_remember_include_file(Context& ctx, // under "Performance" in doc/MANUAL.adoc. if (!(ctx.config.sloppiness() & SLOPPY_INCLUDE_FILE_MTIME) && st.mtime() >= ctx.time_of_compilation) { - cc_log("Include file %s too new", path.c_str()); + log("Include file {} too new", path); return false; } // The same >= logic as above applies to the change time of the file. if (!(ctx.config.sloppiness() & SLOPPY_INCLUDE_FILE_CTIME) && st.ctime() >= ctx.time_of_compilation) { - cc_log("Include file %s ctime too new", path.c_str()); + log("Include file {} ctime too new", path); return false; } @@ -333,7 +333,7 @@ do_remember_include_file(Context& ctx, is_pch = Util::is_precompiled_header(path); if (is_pch) { if (ctx.included_pch_file.empty()) { - cc_log("Detected use of precompiled header: %s", path.c_str()); + log("Detected use of precompiled header: {}", path); } bool using_pch_sum = false; if (ctx.config.pch_external_checksum()) { @@ -343,7 +343,7 @@ do_remember_include_file(Context& ctx, if (Stat::stat(pch_sum_path, Stat::OnError::log)) { path = std::move(pch_sum_path); using_pch_sum = true; - cc_log("Using pch.sum file %s", path.c_str()); + log("Using pch.sum file {}", path); } } @@ -387,7 +387,7 @@ remember_include_file(Context& ctx, { if (!do_remember_include_file(ctx, path, cpp_hash, system, depend_mode_hash) && ctx.config.direct_mode()) { - cc_log("Disabling direct mode"); + log("Disabling direct mode"); ctx.config.set_direct_mode(false); } } @@ -497,7 +497,7 @@ process_preprocessed_file(Context& ctx, } q++; if (q >= end) { - cc_log("Failed to parse included file path"); + log("Failed to parse included file path"); return false; } // q points to the beginning of an include file path @@ -548,7 +548,7 @@ process_preprocessed_file(Context& ctx, // part of inline assembly, refers to an external file. If the file // changes, the hash should change as well, but finding out what file to // hash is too hard for ccache, so just bail out. - cc_log( + log( "Found unsupported .inc" "bin directive in source code"); failed(STATS_UNSUPPORTED_DIRECTIVE); @@ -594,12 +594,11 @@ static void use_relative_paths_in_depfile(const Context& ctx) { if (ctx.config.base_dir().empty()) { - cc_log("Base dir not set, skip using relative paths"); + log("Base dir not set, skip using relative paths"); return; // nothing to do } if (!ctx.has_absolute_include_headers) { - cc_log( - "No absolute path for included files found, skip using relative paths"); + log("No absolute path for included files found, skip using relative paths"); return; // nothing to do } @@ -608,7 +607,7 @@ use_relative_paths_in_depfile(const Context& ctx) try { file_content = Util::read_file(output_dep); } catch (const Error& e) { - cc_log("Cannot open dependency file %s: %s", output_dep.c_str(), e.what()); + log("Cannot open dependency file {}: {}", output_dep, e.what()); return; } @@ -629,9 +628,9 @@ use_relative_paths_in_depfile(const Context& ctx) } if (!rewritten) { - cc_log( - "No paths in dependency file %s made relative, skip relative path usage", - output_dep.c_str()); + log( + "No paths in dependency file {} made relative, skip relative path usage", + output_dep); return; } @@ -649,9 +648,8 @@ result_name_from_depfile(Context& ctx, Hash& hash) try { file_content = Util::read_file(ctx.args_info.output_dep); } catch (const Error& e) { - cc_log("Cannot open dependency file %s: %s", - ctx.args_info.output_dep.c_str(), - e.what()); + log( + "Cannot open dependency file {}: {}", ctx.args_info.output_dep, e.what()); return nullopt; } @@ -707,23 +705,19 @@ do_execute(Context& ctx, if (errors.find("unrecognized command line option") != std::string::npos && errors.find("-fdiagnostics-color") != std::string::npos) { // Old versions of GCC do not support colored diagnostics. - cc_log("-fdiagnostics-color is unsupported; trying again without it"); + log("-fdiagnostics-color is unsupported; trying again without it"); tmp_stdout.fd = Fd(open( tmp_stdout.path.c_str(), O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0600)); if (!tmp_stdout.fd) { - cc_log("Failed to truncate %s: %s", - tmp_stdout.path.c_str(), - strerror(errno)); + log("Failed to truncate {}: {}", tmp_stdout.path, strerror(errno)); failed(STATS_ERROR); } tmp_stderr.fd = Fd(open( tmp_stderr.path.c_str(), O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0600)); if (!tmp_stderr.fd) { - cc_log("Failed to truncate %s: %s", - tmp_stderr.path.c_str(), - strerror(errno)); + log("Failed to truncate {}: {}", tmp_stderr.path, strerror(errno)); failed(STATS_ERROR); } @@ -752,14 +746,14 @@ update_manifest_file(Context& ctx) || ctx.args_info.output_is_precompiled_header; MTR_BEGIN("manifest", "manifest_put"); - cc_log("Adding result name to %s", ctx.manifest_path().c_str()); + log("Adding result name to {}", ctx.manifest_path()); if (!manifest_put(ctx.config, ctx.manifest_path(), ctx.result_name(), ctx.included_files, ctx.time_of_compilation, save_timestamp)) { - cc_log("Failed to add result name to %s", ctx.manifest_path().c_str()); + log("Failed to add result name to {}", ctx.manifest_path()); } else { auto st = Stat::stat(ctx.manifest_path(), Stat::OnError::log); @@ -848,14 +842,12 @@ to_cache(Context& ctx, // non-existent .dwo files. if (unlink(ctx.args_info.output_dwo.c_str()) != 0 && errno != ENOENT && errno != ESTALE) { - cc_log("Failed to unlink %s: %s", - ctx.args_info.output_dwo.c_str(), - strerror(errno)); + log("Failed to unlink {}: {}", ctx.args_info.output_dwo, strerror(errno)); failed(STATS_BADOUTPUTFILE); } } - cc_log("Running real compiler"); + log("Running real compiler"); MTR_BEGIN("execute", "compiler"); TemporaryFile tmp_stdout( @@ -896,7 +888,7 @@ to_cache(Context& ctx, // distcc-pump outputs lines like this: // __________Using # distcc servers in pump mode if (st.size() != 0 && ctx.guessed_compiler != GuessedCompiler::pump) { - cc_log("Compiler produced stdout"); + log("Compiler produced stdout"); failed(STATS_STDOUT); } @@ -909,7 +901,7 @@ to_cache(Context& ctx, } if (status != 0) { - cc_log("Compiler gave exit status %d", status); + log("Compiler gave exit status {}", status); // We can output stderr immediately instead of rerunning the compiler. Util::send_to_stderr(Util::read_file(tmp_stderr_path), @@ -936,11 +928,11 @@ to_cache(Context& ctx, st = Stat::stat(ctx.args_info.output_obj); if (!st) { - cc_log("Compiler didn't produce an object file"); + log("Compiler didn't produce an object file"); failed(STATS_NOOUTPUT); } if (st.size() == 0) { - cc_log("Compiler produced an empty object file"); + log("Compiler produced an empty object file"); failed(STATS_EMPTYOUTPUT); } @@ -977,9 +969,9 @@ to_cache(Context& ctx, auto error = result_writer.finalize(); if (error) { - cc_log("Error: %s", error->c_str()); + log("Error: {}", *error); } else { - cc_log("Stored in cache: %s", ctx.result_path().c_str()); + log("Stored in cache: {}", ctx.result_path()); } auto new_dest_stat = Stat::stat(ctx.result_path(), Stat::OnError::log); @@ -998,9 +990,9 @@ to_cache(Context& ctx, // save the stat call if we exit early. std::string first_level_dir(Util::dir_name(ctx.stats_file())); if (!create_cachedir_tag(first_level_dir)) { - cc_log("Failed to create %s/CACHEDIR.TAG (%s)", - first_level_dir.c_str(), - strerror(errno)); + log("Failed to create {}/CACHEDIR.TAG ({})", + first_level_dir, + strerror(errno)); } // Everything OK. @@ -1044,7 +1036,7 @@ get_result_name_from_cpp(Context& ctx, Args& args, Hash& hash) } args.push_back(ctx.args_info.input_file); add_prefix(ctx, args, ctx.config.prefix_command_cpp()); - cc_log("Running preprocessor"); + log("Running preprocessor"); MTR_BEGIN("execute", "preprocessor"); status = do_execute(ctx, args, std::move(tmp_stdout), std::move(tmp_stderr)); @@ -1053,7 +1045,7 @@ get_result_name_from_cpp(Context& ctx, Args& args, Hash& hash) } if (status != 0) { - cc_log("Preprocessor gave exit status %d", status); + log("Preprocessor gave exit status {}", status); failed(STATS_PREPROCESSOR); } @@ -1067,7 +1059,7 @@ get_result_name_from_cpp(Context& ctx, Args& args, Hash& hash) if (!ctx.args_info.direct_i_file && !hash_binary_file(ctx, hash, stderr_path)) { // Somebody removed the temporary file? - cc_log("Failed to open %s: %s", stderr_path.c_str(), strerror(errno)); + log("Failed to open {}: {}", stderr_path, strerror(errno)); failed(STATS_ERROR); } @@ -1117,8 +1109,8 @@ hash_compiler(const Context& ctx, } else { // command string if (!hash_multicommand_output( hash, ctx.config.compiler_check(), ctx.orig_args[0])) { - cc_log("Failure running compiler check command: %s", - ctx.config.compiler_check().c_str()); + log("Failure running compiler check command: {}", + ctx.config.compiler_check()); failed(STATS_COMPCHECK); } } @@ -1236,16 +1228,16 @@ hash_common_info(const Context& ctx, if (sep_pos != std::string::npos) { std::string old_path = map.substr(0, sep_pos); std::string new_path = map.substr(sep_pos + 1); - cc_log("Relocating debuginfo from %s to %s (CWD: %s)", - old_path.c_str(), - new_path.c_str(), - ctx.apparent_cwd.c_str()); + log("Relocating debuginfo from {} to {} (CWD: {})", + old_path, + new_path, + ctx.apparent_cwd); if (Util::starts_with(ctx.apparent_cwd, old_path)) { dir_to_hash = new_path + ctx.apparent_cwd.substr(old_path.size()); } } } - cc_log("Hashing CWD %s", dir_to_hash.c_str()); + log("Hashing CWD {}", dir_to_hash); hash.hash_delimiter("cwd"); hash.hash(dir_to_hash); } @@ -1276,14 +1268,14 @@ hash_common_info(const Context& ctx, string_view stem = Util::remove_extension(Util::base_name(ctx.args_info.output_obj)); std::string gcda_path = fmt::format("{}/{}.gcda", dir, stem); - cc_log("Hashing coverage path %s", gcda_path.c_str()); + log("Hashing coverage path {}", gcda_path); hash.hash_delimiter("gcda"); hash.hash(gcda_path); } // Possibly hash the sanitize blacklist file path. for (const auto& sanitize_blacklist : args_info.sanitize_blacklists) { - cc_log("Hashing sanitize blacklist %s", sanitize_blacklist.c_str()); + log("Hashing sanitize blacklist {}", sanitize_blacklist); hash.hash("sanitizeblacklist"); if (!hash_binary_file(ctx, hash, sanitize_blacklist)) { failed(STATS_BADEXTRAFILE); @@ -1293,7 +1285,7 @@ hash_common_info(const Context& ctx, if (!ctx.config.extra_files_to_hash().empty()) { for (const std::string& path : Util::split_into_strings( ctx.config.extra_files_to_hash(), PATH_DELIM)) { - cc_log("Hashing extra file %s", path.c_str()); + log("Hashing extra file {}", path); hash.hash_delimiter("extrafile"); if (!hash_binary_file(ctx, hash, path)) { failed(STATS_BADEXTRAFILE); @@ -1334,10 +1326,10 @@ hash_profile_data_file(const Context& ctx, Hash& hash) bool found = false; for (const std::string& p : paths_to_try) { - cc_log("Checking for profile data file %s", p.c_str()); + log("Checking for profile data file {}", p); auto st = Stat::stat(p); if (st && !st.is_directory()) { - cc_log("Adding profile data %s to the hash", p.c_str()); + log("Adding profile data {} to the hash", p); hash.hash_delimiter("-fprofile-use"); if (hash_binary_file(ctx, hash, p)) { found = true; @@ -1390,10 +1382,10 @@ calculate_result_name(Context& ctx, for (size_t i = 1; i < args.size(); i++) { // Trust the user if they've said we should not hash a given option. if (option_should_be_ignored(args[i], ctx.ignore_options())) { - cc_log("Not hashing ignored option: %s", args[i].c_str()); + log("Not hashing ignored option: {}", args[i]); if (i + 1 < args.size() && compopt_takes_arg(args[i])) { i++; - cc_log("Not hashing argument of ignored option: %s", args[i].c_str()); + log("Not hashing argument of ignored option: {}", args[i]); } continue; } @@ -1559,14 +1551,13 @@ calculate_result_name(Context& ctx, if (ctx.args_info.profile_generate) { assert(!ctx.args_info.profile_path.empty()); - cc_log("Adding profile directory %s to our hash", - ctx.args_info.profile_path.c_str()); + log("Adding profile directory {} to our hash", ctx.args_info.profile_path); hash.hash_delimiter("-fprofile-dir"); hash.hash(ctx.args_info.profile_path); } if (ctx.args_info.profile_use && !hash_profile_data_file(ctx, hash)) { - cc_log("No profile data file found"); + log("No profile data file found"); failed(STATS_NOINPUT); } @@ -1613,33 +1604,33 @@ calculate_result_name(Context& ctx, failed(STATS_ERROR); } if (result & HASH_SOURCE_CODE_FOUND_TIME) { - cc_log("Disabling direct mode"); + log("Disabling direct mode"); ctx.config.set_direct_mode(false); return nullopt; } ctx.set_manifest_name(hash.digest()); - cc_log("Looking for result name in %s", ctx.manifest_path().c_str()); + log("Looking for result name in {}", ctx.manifest_path()); MTR_BEGIN("manifest", "manifest_get"); result_name = manifest_get(ctx, ctx.manifest_path()); MTR_END("manifest", "manifest_get"); if (result_name) { - cc_log("Got result name from manifest"); + log("Got result name from manifest"); } else { - cc_log("Did not find result name in manifest"); + log("Did not find result name in manifest"); } } else { if (ctx.args_info.arch_args.empty()) { result_name = get_result_name_from_cpp(ctx, preprocessor_args, hash); - cc_log("Got result name from preprocessor"); + log("Got result name from preprocessor"); } else { preprocessor_args.push_back("-arch"); for (size_t i = 0; i < ctx.args_info.arch_args.size(); ++i) { preprocessor_args.push_back(ctx.args_info.arch_args[i]); result_name = get_result_name_from_cpp(ctx, preprocessor_args, hash); - cc_log("Got result name from preprocessor with -arch %s", - ctx.args_info.arch_args[i].c_str()); + log("Got result name from preprocessor with -arch {}", + ctx.args_info.arch_args[i]); if (i != ctx.args_info.arch_args.size() - 1) { result_name = nullopt; } @@ -1674,7 +1665,7 @@ from_cache(Context& ctx, enum fromcache_call_mode mode) || ctx.guessed_compiler == GuessedCompiler::unknown) && ctx.args_info.output_is_precompiled_header && !ctx.args_info.fno_pch_timestamp && mode == FROMCACHE_CPP_MODE) { - cc_log("Not considering cached precompiled header in preprocessor mode"); + log("Not considering cached precompiled header in preprocessor mode"); return nullopt; } @@ -1687,14 +1678,14 @@ from_cache(Context& ctx, enum fromcache_call_mode mode) auto error = result_reader.read(result_retriever); if (error) { - cc_log("Failed to get result from cache: %s", error->c_str()); + log("Failed to get result from cache: {}", *error); return nullopt; } else { // Update modification timestamp to save file from LRU cleanup. Util::update_mtime(ctx.result_path()); } - cc_log("Succeeded getting cached result"); + log("Succeeded getting cached result"); MTR_END("cache", "from_cache"); @@ -1843,14 +1834,14 @@ initialize(Context& ctx, int argc, const char* const* argv) create_initial_config_file(ctx.config); } - cc_log("=== CCACHE %s STARTED =========================================", - CCACHE_VERSION); + log("=== CCACHE {} STARTED =========================================", + CCACHE_VERSION); if (getenv("CCACHE_INTERNAL_TRACE")) { #ifdef MTR_ENABLED ctx.mini_trace = std::make_unique(ctx.args_info); #else - cc_log("Error: tracing is not enabled!"); + log("Error: tracing is not enabled!"); #endif } } @@ -1863,7 +1854,7 @@ set_up_uncached_err() int uncached_fd = dup(STDERR_FILENO); // The file descriptor is intentionally leaked. if (uncached_fd == -1) { - cc_log("dup(2) failed: %s", strerror(errno)); + log("dup(2) failed: {}", strerror(errno)); failed(STATS_ERROR); } @@ -1927,7 +1918,7 @@ cache_compilation(int argc, const char* const* argv) ctx->orig_args.erase_with_prefix("--ccache-"); add_prefix(*ctx, ctx->orig_args, ctx->config.prefix_command()); - cc_log("Failed; falling back to running the real compiler"); + log("Failed; falling back to running the real compiler"); Args saved_orig_args(std::move(ctx->orig_args)); auto execv_argv = saved_orig_args.to_argv(); @@ -1943,8 +1934,7 @@ static enum stats do_cache_compilation(Context& ctx, const char* const* argv) { if (ctx.actual_cwd.empty()) { - cc_log("Unable to determine current working directory: %s", - strerror(errno)); + log("Unable to determine current working directory: {}", strerror(errno)); failed(STATS_ERROR); } @@ -1959,7 +1949,7 @@ do_cache_compilation(Context& ctx, const char* const* argv) } if (ctx.config.disable()) { - cc_log("ccache is disabled"); + log("ccache is disabled"); // STATS_CACHEMISS is a dummy to trigger stats_flush. failed(STATS_CACHEMISS); } @@ -1969,10 +1959,10 @@ do_cache_compilation(Context& ctx, const char* const* argv) MTR_END("main", "set_up_uncached_err"); log("Command line: {}", Util::format_argv_for_logging(argv)); - cc_log("Hostname: %s", Util::get_hostname()); - cc_log("Working directory: %s", ctx.actual_cwd.c_str()); + log("Hostname: {}", Util::get_hostname()); + log("Working directory: {}", ctx.actual_cwd); if (ctx.apparent_cwd != ctx.actual_cwd) { - cc_log("Apparent working directory: %s", ctx.apparent_cwd.c_str()); + log("Apparent working directory: {}", ctx.apparent_cwd); } ctx.config.set_limit_multiple( @@ -2003,28 +1993,28 @@ do_cache_compilation(Context& ctx, const char* const* argv) && (!ctx.args_info.generating_dependencies || ctx.args_info.output_dep == "/dev/null" || !ctx.config.run_second_cpp())) { - cc_log("Disabling depend mode"); + log("Disabling depend mode"); ctx.config.set_depend_mode(false); } - cc_log("Source file: %s", ctx.args_info.input_file.c_str()); + log("Source file: {}", ctx.args_info.input_file); if (ctx.args_info.generating_dependencies) { - cc_log("Dependency file: %s", ctx.args_info.output_dep.c_str()); + log("Dependency file: {}", ctx.args_info.output_dep); } if (ctx.args_info.generating_coverage) { - cc_log("Coverage file: %s", ctx.args_info.output_cov.c_str()); + log("Coverage file: {}", ctx.args_info.output_cov); } if (ctx.args_info.generating_stackusage) { - cc_log("Stack usage file: %s", ctx.args_info.output_su.c_str()); + log("Stack usage file: {}", ctx.args_info.output_su); } if (ctx.args_info.generating_diagnostics) { - cc_log("Diagnostics file: %s", ctx.args_info.output_dia.c_str()); + log("Diagnostics file: {}", ctx.args_info.output_dia); } if (!ctx.args_info.output_dwo.empty()) { - cc_log("Split dwarf file: %s", ctx.args_info.output_dwo.c_str()); + log("Split dwarf file: {}", ctx.args_info.output_dwo); } - cc_log("Object file: %s", ctx.args_info.output_obj.c_str()); + log("Object file: {}", ctx.args_info.output_obj); MTR_META_THREAD_NAME(ctx.args_info.output_obj.c_str()); if (ctx.config.debug()) { @@ -2034,7 +2024,7 @@ do_cache_compilation(Context& ctx, const char* const* argv) if (debug_text_file) { ctx.hash_debug_files.push_back(std::move(debug_text_file)); } else { - cc_log("Failed to open %s: %s", path.c_str(), strerror(errno)); + log("Failed to open {}: {}", path, strerror(errno)); } } @@ -2066,7 +2056,7 @@ do_cache_compilation(Context& ctx, const char* const* argv) optional result_name; optional result_name_from_manifest; if (ctx.config.direct_mode()) { - cc_log("Trying direct lookup"); + log("Trying direct lookup"); MTR_BEGIN("hash", "direct_hash"); Args dummy_args; result_name = @@ -2093,7 +2083,7 @@ do_cache_compilation(Context& ctx, const char* const* argv) } if (ctx.config.read_only_direct()) { - cc_log("Read-only direct mode; running real compiler"); + log("Read-only direct mode; running real compiler"); failed(STATS_CACHEMISS); } @@ -2131,9 +2121,9 @@ do_cache_compilation(Context& ctx, const char* const* argv) // The best thing here would probably be to remove the hash entry from // the manifest. For now, we use a simpler method: just remove the // manifest file. - cc_log("Hash from manifest doesn't match preprocessor output"); - cc_log("Likely reason: different CCACHE_BASEDIRs used"); - cc_log("Removing manifest as a safety measure"); + log("Hash from manifest doesn't match preprocessor output"); + log("Likely reason: different CCACHE_BASEDIRs used"); + log("Removing manifest as a safety measure"); Util::unlink_safe(ctx.manifest_path()); put_result_in_manifest = true; @@ -2150,7 +2140,7 @@ do_cache_compilation(Context& ctx, const char* const* argv) } if (ctx.config.read_only()) { - cc_log("Read-only mode; running real compiler"); + log("Read-only mode; running real compiler"); failed(STATS_CACHEMISS); } diff --git a/src/cleanup.cpp b/src/cleanup.cpp index 158cf7725..0401d39fa 100644 --- a/src/cleanup.cpp +++ b/src/cleanup.cpp @@ -33,6 +33,8 @@ #include #include +using Logging::log; + static void delete_file(const std::string& path, uint64_t size, @@ -41,7 +43,7 @@ delete_file(const std::string& path, { bool deleted = Util::unlink_safe(path, Util::UnlinkLog::ignore_failure); if (!deleted && errno != ENOENT && errno != ESTALE) { - cc_log("Failed to unlink %s (%s)", path.c_str(), strerror(errno)); + log("Failed to unlink {} ({})", path, strerror(errno)); } else if (cache_size && files_in_cache) { // The counters are intentionally subtracted even if there was no file to // delete since the final cache size calculation will be incorrect if they @@ -74,7 +76,7 @@ clean_up_dir(const std::string& subdir, time_t max_age, const Util::ProgressReceiver& progress_receiver) { - cc_log("Cleaning up cache directory %s", subdir.c_str()); + log("Cleaning up cache directory {}", subdir); std::vector> files; Util::get_level_1_files( @@ -112,9 +114,9 @@ clean_up_dir(const std::string& subdir, return f1->lstat().mtime() < f2->lstat().mtime(); }); - cc_log("Before cleanup: %.0f KiB, %.0f files", - static_cast(cache_size) / 1024, - static_cast(files_in_cache)); + log("Before cleanup: {:.0f} KiB, {:.0f} files", + static_cast(cache_size) / 1024, + static_cast(files_in_cache)); bool cleaned = false; for (size_t i = 0; i < files.size(); @@ -155,12 +157,12 @@ clean_up_dir(const std::string& subdir, cleaned = true; } - cc_log("After cleanup: %.0f KiB, %.0f files", - static_cast(cache_size) / 1024, - static_cast(files_in_cache)); + log("After cleanup: {:.0f} KiB, {:.0f} files", + static_cast(cache_size) / 1024, + static_cast(files_in_cache)); if (cleaned) { - cc_log("Cleaned up cache directory %s", subdir.c_str()); + log("Cleaned up cache directory {}", subdir); stats_add_cleanup(subdir, 1); } @@ -190,7 +192,7 @@ static void wipe_dir(const std::string& subdir, const Util::ProgressReceiver& progress_receiver) { - cc_log("Clearing out cache directory %s", subdir.c_str()); + log("Clearing out cache directory {}", subdir); std::vector> files; Util::get_level_1_files( @@ -202,7 +204,7 @@ wipe_dir(const std::string& subdir, } if (!files.empty()) { - cc_log("Cleared out cache directory %s", subdir.c_str()); + log("Cleared out cache directory {}", subdir); stats_add_cleanup(subdir, 1); } diff --git a/src/compress.cpp b/src/compress.cpp index fff80e183..a1e421910 100644 --- a/src/compress.cpp +++ b/src/compress.cpp @@ -35,6 +35,8 @@ #include #include +using Logging::log; + static File open_file(const std::string& path, const char* mode) { @@ -101,7 +103,7 @@ recompress_file(Context& ctx, return; } - cc_log("Recompressing %s to level %d", cache_file.path().c_str(), level); + log("Recompressing {} to level {}", cache_file.path(), level); AtomicFile atomic_new_file(cache_file.path(), AtomicFile::Mode::binary); auto writer = create_writer(atomic_new_file.stream(), *reader, @@ -137,7 +139,7 @@ recompress_file(Context& ctx, stats_flush_to_file(ctx.config, stats_file, counters); } - cc_log("Recompression of %s done", cache_file.path().c_str()); + log("Recompression of {} done", cache_file.path()); } void diff --git a/src/execute.cpp b/src/execute.cpp index 78759d3fa..77789ac2d 100644 --- a/src/execute.cpp +++ b/src/execute.cpp @@ -133,10 +133,10 @@ win32execute(const char* path, } if (ret == 0) { DWORD error = GetLastError(); - cc_log("failed to execute %s: %s (%lu)", - full_path.c_str(), - Win32Util::error_message(error).c_str(), - error); + log("failed to execute {}: {} ({})", + full_path, + Win32Util::error_message(error), + error); return -1; } WaitForSingleObject(pi.hProcess, INFINITE); @@ -213,7 +213,7 @@ find_executable(const Context& ctx, path = getenv("PATH"); } if (path.empty()) { - cc_log("No PATH variable"); + log("No PATH variable"); return {}; } diff --git a/src/hashutil.cpp b/src/hashutil.cpp index 26689ca94..0a64d024e 100644 --- a/src/hashutil.cpp +++ b/src/hashutil.cpp @@ -247,7 +247,7 @@ hash_source_code_string(const Context& ctx, hash.hash(str); if (result & HASH_SOURCE_CODE_FOUND_DATE) { - cc_log("Found __DATE__ in %s", path.c_str()); + log("Found __DATE__ in {}", path); // Make sure that the hash sum changes if the (potential) expansion of // __DATE__ changes. @@ -266,10 +266,10 @@ hash_source_code_string(const Context& ctx, // not very useful since the chance that we get a cache hit later the same // second should be quite slim... So, just signal back to the caller that // __TIME__ has been found so that the direct mode can be disabled. - cc_log("Found __TIME__ in %s", path.c_str()); + log("Found __TIME__ in {}", path); } if (result & HASH_SOURCE_CODE_FOUND_TIMESTAMP) { - cc_log("Found __TIMESTAMP__ in %s", path.c_str()); + log("Found __TIMESTAMP__ in {}", path); // Make sure that the hash sum changes if the (potential) expansion of // __TIMESTAMP__ changes. @@ -450,7 +450,7 @@ hash_command_output(Hash& hash, int fd = _open_osfhandle((intptr_t)pipe_out[0], O_BINARY); bool ok = hash.hash_fd(fd); if (!ok) { - cc_log("Error hashing compiler check command output: %s", strerror(errno)); + log("Error hashing compiler check command output: {}", strerror(errno)); } WaitForSingleObject(pi.hProcess, INFINITE); DWORD exitcode; @@ -459,7 +459,7 @@ hash_command_output(Hash& hash, CloseHandle(pi.hProcess); CloseHandle(pi.hThread); if (exitcode != 0) { - cc_log("Compiler check command returned %d", (int)exitcode); + log("Compiler check command returned {}", exitcode); return false; } return ok; @@ -487,18 +487,17 @@ hash_command_output(Hash& hash, close(pipefd[1]); bool ok = hash.hash_fd(pipefd[0]); if (!ok) { - cc_log("Error hashing compiler check command output: %s", - strerror(errno)); + log("Error hashing compiler check command output: {}", strerror(errno)); } close(pipefd[0]); int status; if (waitpid(pid, &status, 0) != pid) { - cc_log("waitpid failed"); + log("waitpid failed"); return false; } if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { - cc_log("Compiler check command returned %d", WEXITSTATUS(status)); + log("Compiler check command returned {}", WEXITSTATUS(status)); return false; } return ok; diff --git a/src/manifest.cpp b/src/manifest.cpp index afb0bea89..e0d401b9f 100644 --- a/src/manifest.cpp +++ b/src/manifest.cpp @@ -107,6 +107,7 @@ // 1: Introduced in ccache 3.0. (Files are always compressed with gzip.) // 2: Introduced in ccache 4.0. +using Logging::log; using nonstd::nullopt; using nonstd::optional; @@ -426,25 +427,24 @@ verify_result(const Context& ctx, || ctx.guessed_compiler == GuessedCompiler::unknown) && ctx.args_info.output_is_precompiled_header && !ctx.args_info.fno_pch_timestamp && fi.mtime != fs.mtime) { - cc_log("Precompiled header includes %s, which has a new mtime", - path.c_str()); + log("Precompiled header includes {}, which has a new mtime", path); return false; } if (ctx.config.sloppiness() & SLOPPY_FILE_STAT_MATCHES) { if (!(ctx.config.sloppiness() & SLOPPY_FILE_STAT_MATCHES_CTIME)) { if (fi.mtime == fs.mtime && fi.ctime == fs.ctime) { - cc_log("mtime/ctime hit for %s", path.c_str()); + log("mtime/ctime hit for {}", path); continue; } else { - cc_log("mtime/ctime miss for %s", path.c_str()); + log("mtime/ctime miss for {}", path); } } else { if (fi.mtime == fs.mtime) { - cc_log("mtime hit for %s", path.c_str()); + log("mtime hit for {}", path); continue; } else { - cc_log("mtime miss for %s", path.c_str()); + log("mtime miss for {}", path); } } } @@ -454,7 +454,7 @@ verify_result(const Context& ctx, Hash hash; int ret = hash_source_code_file(ctx, hash, path, fs.size); if (ret & HASH_SOURCE_CODE_ERROR) { - cc_log("Failed hashing %s", path.c_str()); + log("Failed hashing {}", path); return false; } if (ret & HASH_SOURCE_CODE_FOUND_TIME) { @@ -484,11 +484,11 @@ manifest_get(const Context& ctx, const std::string& path) // Update modification timestamp to save files from LRU cleanup. Util::update_mtime(path); } else { - cc_log("No such manifest file"); + log("No such manifest file"); return nullopt; } } catch (const Error& e) { - cc_log("Error: %s", e.what()); + log("Error: {}", e.what()); return nullopt; } @@ -529,7 +529,7 @@ manifest_put(const Config& config, mf = std::make_unique(); } } catch (const Error& e) { - cc_log("Error: %s", e.what()); + log("Error: {}", e.what()); // Manifest file was corrupt, ignore. mf = std::make_unique(); } @@ -545,15 +545,15 @@ manifest_put(const Config& config, // A good way of solving this would be to maintain the result entries in // LRU order and discarding the old ones. An easy way is to throw away all // entries when there are too many. Let's do that for now. - cc_log("More than %u entries in manifest file; discarding", - k_max_manifest_entries); + log("More than {} entries in manifest file; discarding", + k_max_manifest_entries); mf = std::make_unique(); } else if (mf->file_infos.size() > k_max_manifest_file_info_entries) { // Rarely, FileInfo entries can grow large in pathological cases where // many included files change, but the main file does not. This also puts // an upper bound on the number of FileInfo entries. - cc_log("More than %u FileInfo entries in manifest file; discarding", - k_max_manifest_file_info_entries); + log("More than {} FileInfo entries in manifest file; discarding", + k_max_manifest_file_info_entries); mf = std::make_unique(); } @@ -564,7 +564,7 @@ manifest_put(const Config& config, write_manifest(config, path, *mf); return true; } catch (const Error& e) { - cc_log("Error: %s", e.what()); + log("Error: {}", e.what()); return false; } } diff --git a/src/stats.cpp b/src/stats.cpp index 01d470037..f4d431e61 100644 --- a/src/stats.cpp +++ b/src/stats.cpp @@ -38,6 +38,8 @@ #define FLAG_ALWAYS 2 // always show, even if zero #define FLAG_NEVER 4 // never show +using Logging::log; + // Returns a formatted version of a statistics value, or the empty string if the // statistics line shouldn't be printed. using format_fn = std::string (*)(uint64_t value); @@ -234,7 +236,7 @@ stats_write(const std::string& path, const Counters& counters) } catch (const Error& e) { // Make failure to write a stats file a soft error since it's not important // enough to fail whole the process. - cc_log("Error: %s", e.what()); + log("Error: {}", e.what()); } } @@ -315,14 +317,14 @@ stats_flush_to_file(const Config& config, if (config.disable()) { // Just log result, don't update statistics. - cc_log("Result: disabled"); + log("Result: disabled"); return; } if (!config.log_file().empty() || config.debug()) { for (auto& info : stats_info) { if (updates[info.stat] != 0 && !(info.flags & FLAG_NOZERO)) { - cc_log("Result: %s", info.message); + log("Result: {}", info.message); } } } @@ -351,18 +353,18 @@ stats_flush_to_file(const Config& config, if (config.max_files() != 0 && counters[STATS_NUMFILES] > config.max_files() / 16) { - cc_log("Need to clean up %s since it holds %u files (limit: %u files)", - subdir.c_str(), - counters[STATS_NUMFILES], - config.max_files() / 16); + log("Need to clean up {} since it holds {} files (limit: {} files)", + subdir, + counters[STATS_NUMFILES], + config.max_files() / 16); need_cleanup = true; } if (config.max_size() != 0 && counters[STATS_TOTALSIZE] > config.max_size() / 1024 / 16) { - cc_log("Need to clean up %s since it holds %u KiB (limit: %lu KiB)", - subdir.c_str(), - counters[STATS_TOTALSIZE], - (unsigned long)config.max_size() / 1024 / 16); + log("Need to clean up {} since it holds {} KiB (limit: {} KiB)", + subdir, + counters[STATS_TOTALSIZE], + config.max_size() / 1024 / 16); need_cleanup = true; }