fail-fast: false
matrix:
config:
- - name: Linux GCC debug + in source + tracing
+ - name: Linux GCC debug + C++14 + in source + tracing
os: ubuntu-18.04
CC: gcc
CXX: g++
ENABLE_CACHE_CLEANUP_TESTS: 1
BUILDDIR: .
CCACHE_LOC: .
- CMAKE_PARAMS: -DCMAKE_BUILD_TYPE=Debug -DENABLE_TRACING=1
+ CMAKE_PARAMS: -DCMAKE_BUILD_TYPE=Debug -DENABLE_TRACING=1 -DCMAKE_CXX_STANDARD=14
apt_get: elfutils libzstd1-dev
- name: Linux GCC 32-bit
project(ccache LANGUAGES C CXX ASM)
set(CMAKE_PROJECT_DESCRIPTION "a fast C/C++ compiler cache")
-set(CMAKE_CXX_STANDARD 11)
+if(NOT "${CMAKE_CXX_STANDARD}")
+ set(CMAKE_CXX_STANDARD 11)
+endif()
set(CMAKE_CXX_STANDARD_REQUIRED YES)
set(CMAKE_CXX_EXTENSIONS NO)
-Wno-global-constructors
-Wno-implicit-fallthrough
-Wno-padded
+ -Wno-shadow # Warnings in fmtlib
-Wno-shorten-64-to-32
-Wno-sign-conversion
+ -Wno-signed-enum-bitfield # Warnings in fmtlib
-Wno-weak-vtables
-Wno-old-style-cast)
- key: readability-function-size.LineThreshold
value: 700
- key: readability-function-size.StatementThreshold
- value: 500
+ value: 999999
- key: readability-function-size.BranchThreshold
value: 170
- key: readability-function-size.ParameterThreshold
value: 6
- key: readability-function-size.NestingThreshold
- value: 6
+ value: 999999
- key: readability-function-size.VariableThreshold
value: 80
...
#include <string>
#include <vector>
-using Logging::log;
using nonstd::string_view;
Context::Context()
if (n_wildcards == 0 || (n_wildcards == 1 && option.back() == '*')) {
m_ignore_options.push_back(option);
} else {
- log("Skipping malformed ignore_options item: {}", option);
+ LOG("Skipping malformed ignore_options item: {}", option);
continue;
}
}
#include "Fd.hpp"
#include "Logging.hpp"
-using Logging::log;
using nonstd::string_view;
const string_view HASH_DELIMITER("\000cCaChE\000", 8);
{
Fd fd(open(path.c_str(), O_RDONLY | O_BINARY));
if (!fd) {
- log("Failed to open {}: {}", path, strerror(errno));
+ LOG("Failed to open {}: {}", path, strerror(errno));
return false;
}
#include <sys/mman.h>
#include <type_traits>
-using Logging::log;
-
// The inode cache resides on a file that is mapped into shared memory by
// running processes. It is implemented as a two level structure, where the top
// level is a hash table consisting of buckets. Each bucket contains entries
}
Fd fd(open(inode_cache_file.c_str(), O_RDWR));
if (!fd) {
- log("Failed to open inode cache {}: {}", inode_cache_file, strerror(errno));
+ LOG("Failed to open inode cache {}: {}", inode_cache_file, strerror(errno));
return false;
}
bool is_nfs;
if (Util::is_nfs_fd(*fd, &is_nfs) == 0 && is_nfs) {
- log(
+ LOG(
"Inode cache not supported because the cache file is located on nfs: {}",
inode_cache_file);
return false;
nullptr, sizeof(SharedRegion), PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0));
fd.close();
if (sr == reinterpret_cast<void*>(-1)) {
- log("Failed to mmap {}: {}", inode_cache_file, strerror(errno));
+ LOG("Failed to mmap {}: {}", inode_cache_file, strerror(errno));
return false;
}
// Drop the file from disk if the found version is not matching. This will
// allow a new file to be generated.
if (sr->version != k_version) {
- log(
+ LOG(
"Dropping inode cache because found version {} does not match expected"
" version {}",
sr->version,
}
m_sr = sr;
if (m_config.debug()) {
- log("inode cache file loaded: {}", inode_cache_file);
+ LOG("inode cache file loaded: {}", inode_cache_file);
}
return true;
}
{
Stat stat = Stat::stat(path);
if (!stat) {
- log("Could not stat {}: {}", path, strerror(stat.error_number()));
+ LOG("Could not stat {}: {}", path, strerror(stat.error_number()));
return false;
}
}
err = pthread_mutex_consistent(&bucket->mt);
if (err) {
- log(
+ LOG(
"Can't consolidate stale mutex at index {}: {}", index, strerror(err));
- log("Consider removing the inode cache file if the problem persists");
+ LOG_RAW("Consider removing the inode cache file if the problem persists");
return nullptr;
}
- log("Wiping bucket at index {} because of stale mutex", index);
+ LOG("Wiping bucket at index {} because of stale mutex", index);
memset(bucket->entries, 0, sizeof(Bucket::entries));
} else {
#endif
if (err) {
- log("Failed to lock mutex at index {}: {}", index, strerror(err));
- log("Consider removing the inode cache file if problem persists");
+ LOG("Failed to lock mutex at index {}: {}", index, strerror(err));
+ LOG_RAW("Consider removing the inode cache file if problem persists");
++m_sr->errors;
return nullptr;
}
bool
InodeCache::create_new_file(const std::string& filename)
{
- log("Creating a new inode cache");
+ LOG_RAW("Creating a new inode cache");
// Create the new file to a temporary name to prevent other processes from
// mapping it before it is fully initialized.
bool is_nfs;
if (Util::is_nfs_fd(*tmp_file.fd, &is_nfs) == 0 && is_nfs) {
- log(
+ LOG(
"Inode cache not supported because the cache file would be located on"
" nfs: {}",
filename);
}
int err = Util::fallocate(*tmp_file.fd, sizeof(SharedRegion));
if (err) {
- log("Failed to allocate file space for inode cache: {}", strerror(err));
+ LOG("Failed to allocate file space for inode cache: {}", strerror(err));
return false;
}
SharedRegion* sr =
*tmp_file.fd,
0));
if (sr == reinterpret_cast<void*>(-1)) {
- log("Failed to mmap new inode cache: {}", strerror(errno));
+ LOG("Failed to mmap new inode cache: {}", strerror(errno));
return false;
}
// which will make us use the first created file even if we didn't win the
// race.
if (link(tmp_file.path.c_str(), filename.c_str()) != 0) {
- log("Failed to link new inode cache: {}", strerror(errno));
+ LOG("Failed to link new inode cache: {}", strerror(errno));
return false;
}
}
release_bucket(bucket);
- log("inode cache {}: {}", found ? "hit" : "miss", path);
+ LOG("inode cache {}: {}", found ? "hit" : "miss", path);
if (m_config.debug()) {
if (found) {
} else {
++m_sr->misses;
}
- log("accumulated stats for inode cache: hits={}, misses={}, errors={}",
+ LOG("accumulated stats for inode cache: hits={}, misses={}, errors={}",
m_sr->hits.load(),
m_sr->misses.load(),
m_sr->errors.load());
release_bucket(bucket);
- log("inode cache insert: {}", path);
+ LOG("inode cache insert: {}", path);
return true;
}
#include <sstream>
#include <thread>
-using Logging::log;
-
namespace {
#ifndef _WIN32
}
int saved_errno = errno;
- log("lockfile_acquire: symlink {}: {}", lockfile, strerror(saved_errno));
+ LOG("lockfile_acquire: symlink {}: {}", lockfile, strerror(saved_errno));
if (saved_errno == ENOENT) {
// Directory doesn't exist?
if (Util::create_dir(Util::dir_name(lockfile))) {
// acquiring it.
continue;
} else {
- log("lockfile_acquire: readlink {}: {}", lockfile, strerror(errno));
+ LOG("lockfile_acquire: readlink {}: {}", lockfile, strerror(errno));
return false;
}
}
if (content == my_content) {
// Lost NFS reply?
- log("lockfile_acquire: symlink {} failed but we got the lock anyway",
+ LOG("lockfile_acquire: symlink {} failed but we got the lock anyway",
lockfile);
return true;
}
// A possible improvement here would be to check if the process holding the
// lock is still alive and break the lock early if it isn't.
- log("lockfile_acquire: lock info for {}: {}", lockfile, content);
+ LOG("lockfile_acquire: lock info for {}: {}", lockfile, content);
if (initial_content.empty()) {
initial_content = content;
}
if (slept <= staleness_limit) {
- log("lockfile_acquire: failed to acquire {}; sleeping {} microseconds",
+ LOG("lockfile_acquire: failed to acquire {}; sleeping {} microseconds",
lockfile,
to_sleep);
usleep(to_sleep);
slept += to_sleep;
to_sleep = std::min(max_to_sleep, 2 * to_sleep);
} else if (content != initial_content) {
- log("lockfile_acquire: gave up acquiring {}", lockfile);
+ LOG("lockfile_acquire: gave up acquiring {}", lockfile);
return false;
} else {
// The lock seems to be stale -- break it and try again.
- log("lockfile_acquire: breaking {}", lockfile);
+ LOG("lockfile_acquire: breaking {}", lockfile);
if (!Util::unlink_tmp(lockfile)) {
- log("Failed to unlink {}: {}", lockfile, strerror(errno));
+ LOG("Failed to unlink {}: {}", lockfile, strerror(errno));
return false;
}
to_sleep = 1000;
}
DWORD error = GetLastError();
- log("lockfile_acquire: CreateFile {}: {} ({})",
+ LOG("lockfile_acquire: CreateFile {}: {} ({})",
lockfile,
Win32Util::error_message(error),
error);
}
if (slept > staleness_limit) {
- log("lockfile_acquire: gave up acquiring {}", lockfile);
+ LOG("lockfile_acquire: gave up acquiring {}", lockfile);
break;
}
- log("lockfile_acquire: failed to acquire {}; sleeping {} microseconds",
+ LOG("lockfile_acquire: failed to acquire {}; sleeping {} microseconds",
lockfile,
to_sleep);
usleep(to_sleep);
m_handle = do_acquire_win32(m_lockfile, staleness_limit);
#endif
if (acquired()) {
- log("Acquired lock {}", m_lockfile);
+ LOG("Acquired lock {}", m_lockfile);
} else {
- log("Failed to acquire lock {}", m_lockfile);
+ LOG("Failed to acquire lock {}", m_lockfile);
}
}
Lockfile::~Lockfile()
{
if (acquired()) {
- log("Releasing lock {}", m_lockfile);
+ LOG("Releasing lock {}", m_lockfile);
#ifndef _WIN32
if (!Util::unlink_tmp(m_lockfile)) {
- log("Failed to unlink {}: {}", m_lockfile, strerror(errno));
+ LOG("Failed to unlink {}: {}", m_lockfile, strerror(errno));
}
#else
CloseHandle(m_handle);
if (file) {
(void)fwrite(debug_log_buffer.data(), debug_log_buffer.length(), 1, *file);
} else {
- log("Failed to open {}: {}", path, strerror(errno));
+ LOG("Failed to open {}: {}", path, strerror(errno));
}
}
#include "FormatNonstdStringView.hpp"
#include "third_party/fmt/core.h"
+#include "third_party/fmt/format.h"
#include "third_party/nonstd/optional.hpp"
#include "third_party/nonstd/string_view.hpp"
#include <string>
#include <utility>
+// Log a raw message (plus a newline character).
+#define LOG_RAW(message_) \
+ do { \
+ if (Logging::enabled()) { \
+ Logging::log(nonstd::string_view(message_)); \
+ } \
+ } while (false)
+
+// Log a message (plus a newline character) described by a format string with at
+// least one placeholder. `format` is compile-time checked if CMAKE_CXX_STANDARD
+// >= 14.
+#define LOG(format_, ...) LOG_RAW(fmt::format(FMT_STRING(format_), __VA_ARGS__))
+
+// Log a message (plus a newline character) described by a format string with at
+// least one placeholder without flushing and with a reused timestamp. `format`
+// is compile-time checked if CMAKE_CXX_STANDARD >= 14.
+#define BULK_LOG(format_, ...) \
+ do { \
+ if (Logging::enabled()) { \
+ Logging::bulk_log(fmt::format(FMT_STRING(format_), __VA_ARGS__)); \
+ } \
+ } while (false)
+
class Config;
namespace Logging {
// Write the current log memory buffer `path`.
void dump_log(const std::string& path);
-// Log a message (plus a newline character). `args` are forwarded to
-// `fmt::format`.
-template<typename... T>
-inline void
-log(T&&... args)
-{
- if (!enabled()) {
- return;
- }
- log(nonstd::string_view(fmt::format(std::forward<T>(args)...)));
-}
-
-// Log a message (plus a newline character) without flushing and with a reused
-// timestamp. `args` are forwarded to `fmt::format`.
-template<typename... T>
-inline void
-bulk_log(T&&... args)
-{
- if (!enabled()) {
- return;
- }
- bulk_log(nonstd::string_view(fmt::format(std::forward<T>(args)...)));
-}
-
} // namespace Logging
// 1: Introduced in ccache 3.0. (Files are always compressed with gzip.)
// 2: Introduced in ccache 4.0.
-using Logging::log;
using nonstd::nullopt;
using nonstd::optional;
|| ctx.guessed_compiler == GuessedCompiler::unknown)
&& ctx.args_info.output_is_precompiled_header
&& !ctx.args_info.fno_pch_timestamp && fi.mtime != fs.mtime) {
- log("Precompiled header includes {}, which has a new mtime", path);
+ LOG("Precompiled header includes {}, which has a new mtime", path);
return false;
}
if (ctx.config.sloppiness() & SLOPPY_FILE_STAT_MATCHES) {
if (!(ctx.config.sloppiness() & SLOPPY_FILE_STAT_MATCHES_CTIME)) {
if (fi.mtime == fs.mtime && fi.ctime == fs.ctime) {
- log("mtime/ctime hit for {}", path);
+ LOG("mtime/ctime hit for {}", path);
continue;
} else {
- log("mtime/ctime miss for {}", path);
+ LOG("mtime/ctime miss for {}", path);
}
} else {
if (fi.mtime == fs.mtime) {
- log("mtime hit for {}", path);
+ LOG("mtime hit for {}", path);
continue;
} else {
- log("mtime miss for {}", path);
+ LOG("mtime miss for {}", path);
}
}
}
Hash hash;
int ret = hash_source_code_file(ctx, hash, path, fs.size);
if (ret & HASH_SOURCE_CODE_ERROR) {
- log("Failed hashing {}", path);
+ LOG("Failed hashing {}", path);
return false;
}
if (ret & HASH_SOURCE_CODE_FOUND_TIME) {
// Update modification timestamp to save files from LRU cleanup.
Util::update_mtime(path);
} else {
- log("No such manifest file");
+ LOG_RAW("No such manifest file");
return nullopt;
}
} catch (const Error& e) {
- log("Error: {}", e.what());
+ LOG("Error: {}", e.what());
return nullopt;
}
mf = std::make_unique<ManifestData>();
}
} catch (const Error& e) {
- log("Error: {}", e.what());
+ LOG("Error: {}", e.what());
// Manifest file was corrupt, ignore.
mf = std::make_unique<ManifestData>();
}
// A good way of solving this would be to maintain the result entries in
// LRU order and discarding the old ones. An easy way is to throw away all
// entries when there are too many. Let's do that for now.
- log("More than {} entries in manifest file; discarding",
+ LOG("More than {} entries in manifest file; discarding",
k_max_manifest_entries);
mf = std::make_unique<ManifestData>();
} else if (mf->file_infos.size() > k_max_manifest_file_info_entries) {
// Rarely, FileInfo entries can grow large in pathological cases where
// many included files change, but the main file does not. This also puts
// an upper bound on the number of FileInfo entries.
- log("More than {} FileInfo entries in manifest file; discarding",
+ LOG("More than {} FileInfo entries in manifest file; discarding",
k_max_manifest_file_info_entries);
mf = std::make_unique<ManifestData>();
}
write_manifest(config, path, *mf);
return true;
} catch (const Error& e) {
- log("Error: {}", e.what());
+ LOG("Error: {}", e.what());
return false;
}
}
//
// 1: Introduced in ccache 4.0.
-using Logging::log;
using nonstd::nullopt;
using nonstd::optional;
using nonstd::string_view;
optional<std::string>
Result::Reader::read(Consumer& consumer)
{
- log("Reading result {}", m_result_path);
+ LOG("Reading result {}", m_result_path);
try {
if (read_result(consumer)) {
for (const auto& pair : m_entries_to_write) {
const auto file_type = pair.first;
const auto& path = pair.second;
- log("Storing result {}", path);
+ LOG("Storing result {}", path);
const bool store_raw = should_store_raw_file(m_ctx.config, file_type);
uint64_t file_size = Stat::stat(path, Stat::OnError::throw_error).size();
- log("Storing {} file #{} {} ({} bytes) from {}",
+ LOG("Storing {} file #{} {} ({} bytes) from {}",
store_raw ? "raw" : "embedded",
entry_number,
file_type_to_string(file_type),
#include "Context.hpp"
#include "Logging.hpp"
-using Logging::log;
using Result::FileType;
ResultRetriever::ResultRetriever(Context& ctx, bool rewrite_dependency_target)
}
if (dest_path.empty()) {
- log("Not copying");
+ LOG_RAW("Not copying");
} else if (dest_path == "/dev/null") {
- log("Not copying to /dev/null");
+ LOG_RAW("Not copying to /dev/null");
} else {
- log("Retrieving {} file #{} {} ({} bytes)",
+ LOG("Retrieving {} file #{} {} ({} bytes)",
raw_file ? "raw" : "embedded",
entry_number,
Result::file_type_to_string(file_type),
// if hard-linked, to make the object file newer than the source file).
Util::update_mtime(*raw_file);
} else {
- log("Copying to {}", dest_path);
+ LOG("Copying to {}", dest_path);
m_dest_fd = Fd(
open(dest_path.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0666));
if (!m_dest_fd) {
#include "Logging.hpp"
-using Logging::log;
-
Stat::Stat(StatFunction stat_function,
const std::string& path,
Stat::OnError on_error)
throw Error("failed to stat {}: {}", path, strerror(errno));
}
if (on_error == OnError::log) {
- log("Failed to stat {}: {}", path, strerror(errno));
+ LOG("Failed to stat {}: {}", path, strerror(errno));
}
// The file is missing, so just zero fill the stat structure. This will
#include "Util.hpp"
#include "exceptions.hpp"
-using Logging::log;
-using nonstd::nullopt;
-using nonstd::optional;
-
const unsigned FLAG_NOZERO = 1; // don't zero with the -z option
const unsigned FLAG_ALWAYS = 2; // always show, even if zero
const unsigned FLAG_NEVER = 4; // never show
-using Logging::log;
using nonstd::nullopt;
using nonstd::optional;
{
Lockfile lock(path);
if (!lock.acquired()) {
- log("failed to acquire lock for {}", path);
+ LOG("Failed to acquire lock for {}", path);
return nullopt;
}
// Make failure to write a stats file a soft error since it's not
// important enough to fail whole the process and also because it is
// called in the Context destructor.
- log("Error: {}", e.what());
+ LOG("Error: {}", e.what());
}
return counters;
# endif
#endif
-using Logging::log;
using nonstd::nullopt;
using nonstd::optional;
using nonstd::string_view;
{
if (ctx.config.file_clone()) {
#ifdef FILE_CLONING_SUPPORTED
- log("Cloning {} to {}", source, dest);
+ LOG("Cloning {} to {}", source, dest);
try {
clone_file(source, dest, via_tmp_file);
return;
} catch (Error& e) {
- log("Failed to clone: {}", e.what());
+ LOG("Failed to clone: {}", e.what());
}
#else
- log("Not cloning {} to {} since it's unsupported", source, dest);
+ LOG("Not cloning {} to {} since it's unsupported", source, dest);
#endif
}
if (ctx.config.hard_link()) {
unlink(dest.c_str());
- log("Hard linking {} to {}", source, dest);
+ LOG("Hard linking {} to {}", source, dest);
int ret = link(source.c_str(), dest.c_str());
if (ret == 0) {
if (chmod(dest.c_str(), 0444) != 0) {
- log("Failed to chmod: {}", strerror(errno));
+ LOG("Failed to chmod: {}", strerror(errno));
}
return;
}
- log("Failed to hard link: {}", strerror(errno));
+ LOG("Failed to hard link: {}", strerror(errno));
}
- log("Copying {} to {}", source, dest);
+ LOG("Copying {} to {}", source, dest);
copy_file(source, dest, via_tmp_file);
}
}
if (ret == -1) {
- log("Failed reading {}", path);
+ LOG("Failed reading {}", path);
throw Error(strerror(errno));
}
}
}
if (success || unlink_log == UnlinkLog::log_failure) {
- log("Unlink {} via {}", path, tmp_name);
+ LOG("Unlink {} via {}", path, tmp_name);
if (!success) {
- log("Unlink failed: {}", strerror(saved_errno));
+ LOG("Unlink failed: {}", strerror(saved_errno));
}
}
unlink(path.c_str()) == 0 || (errno == ENOENT || errno == ESTALE);
saved_errno = errno;
if (success || unlink_log == UnlinkLog::log_failure) {
- log("Unlink {}", path);
+ LOG("Unlink {}", path);
if (!success) {
- log("Unlink failed: {}", strerror(saved_errno));
+ LOG("Unlink failed: {}", strerror(saved_errno));
}
}
#include "CacheFile.hpp"
+#include "third_party/fmt/core.h"
+#include "third_party/fmt/format.h"
#include "third_party/nonstd/optional.hpp"
#include "third_party/nonstd/string_view.hpp"
#include <algorithm>
-using Logging::log;
-
ZstdCompressor::ZstdCompressor(FILE* stream, int8_t compression_level)
: m_stream(stream), m_zstd_stream(ZSTD_createCStream())
{
if (compression_level == 0) {
compression_level = default_compression_level;
- log("Using default compression level {}", compression_level);
+ LOG("Using default compression level {}", compression_level);
}
// libzstd 1.3.4 and newer support negative levels. However, the query
// function ZSTD_minCLevel did not appear until 1.3.6, so perform detection
// based on version instead.
if (ZSTD_versionNumber() < 10304 && compression_level < 1) {
- log(
+ LOG(
"Using compression level 1 (minimum level supported by libzstd) instead"
" of {}",
compression_level);
m_compression_level = std::min<int>(compression_level, ZSTD_maxCLevel());
if (m_compression_level != compression_level) {
- log("Using compression level {} (max libzstd level) instead of {}",
+ LOG("Using compression level {} (max libzstd level) instead of {}",
m_compression_level,
compression_level);
}
#include <cassert>
-using Logging::log;
using nonstd::nullopt;
using nonstd::optional;
using nonstd::string_view;
std::string pch_file;
if (option == "-include-pch" || option == "-include-pth") {
if (Stat::stat(arg)) {
- log("Detected use of precompiled header: {}", arg);
+ LOG("Detected use of precompiled header: {}", arg);
pch_file = arg;
}
} else if (!is_cc1_option) {
for (const auto& extension : {".gch", ".pch", ".pth"}) {
std::string path = arg + extension;
if (Stat::stat(path)) {
- log("Detected use of precompiled header: {}", path);
+ LOG("Detected use of precompiled header: {}", path);
pch_file = path;
}
}
if (!pch_file.empty()) {
if (!ctx.included_pch_file.empty()) {
- log("Multiple precompiled headers used: {} and {}",
+ LOG("Multiple precompiled headers used: {} and {}",
ctx.included_pch_file,
pch_file);
return false;
new_profile_use = true;
new_profile_path = arg.substr(arg.find('=') + 1);
} else {
- log("Unknown profiling option: {}", arg);
+ LOG("Unknown profiling option: {}", arg);
return false;
}
if (new_profile_use) {
if (ctx.args_info.profile_use) {
- log("Multiple profiling options not supported");
+ LOG_RAW("Multiple profiling options not supported");
return false;
}
ctx.args_info.profile_use = true;
if (!new_profile_path.empty()) {
ctx.args_info.profile_path = new_profile_path;
- log("Set profile directory to {}", ctx.args_info.profile_path);
+ LOG("Set profile directory to {}", ctx.args_info.profile_path);
}
if (ctx.args_info.profile_generate && ctx.args_info.profile_use) {
// Too hard to figure out what the compiler will do.
- log("Both generating and using profile info, giving up");
+ LOG_RAW("Both generating and using profile info, giving up");
return false;
}
if (args[i] == "--ccache-skip") {
i++;
if (i == args.size()) {
- log("--ccache-skip lacks an argument");
+ LOG_RAW("--ccache-skip lacks an argument");
return Statistic::bad_compiler_arguments;
}
state.common_args.push_back(args[i]);
}
auto file_args = Args::from_gcc_atfile(argpath);
if (!file_args) {
- log("Couldn't read arg file {}", argpath);
+ LOG("Couldn't read arg file {}", argpath);
return Statistic::bad_compiler_arguments;
}
if (ctx.guessed_compiler == GuessedCompiler::nvcc
&& (args[i] == "-optf" || args[i] == "--options-file")) {
if (i == args.size() - 1) {
- log("Expected argument after {}", args[i]);
+ LOG("Expected argument after {}", args[i]);
return Statistic::bad_compiler_arguments;
}
++i;
for (auto it = paths.rbegin(); it != paths.rend(); ++it) {
auto file_args = Args::from_gcc_atfile(*it);
if (!file_args) {
- log("Couldn't read CUDA options file {}", *it);
+ LOG("Couldn't read CUDA options file {}", *it);
return Statistic::bad_compiler_arguments;
}
// These are always too hard.
if (compopt_too_hard(args[i]) || Util::starts_with(args[i], "-fdump-")
|| Util::starts_with(args[i], "-MJ")) {
- log("Compiler option {} is unsupported", args[i]);
+ LOG("Compiler option {} is unsupported", args[i]);
return Statistic::unsupported_compiler_option;
}
// These are too hard in direct mode.
if (config.direct_mode() && compopt_too_hard_for_direct_mode(args[i])) {
- log("Unsupported compiler option for direct mode: {}", args[i]);
+ LOG("Unsupported compiler option for direct mode: {}", args[i]);
config.set_direct_mode(false);
}
// -Xarch_* options are too hard.
if (Util::starts_with(args[i], "-Xarch_")) {
- log("Unsupported compiler option: {}", args[i]);
+ LOG("Unsupported compiler option: {}", args[i]);
return Statistic::unsupported_compiler_option;
}
|| (ctx.guessed_compiler == GuessedCompiler::nvcc
&& args[i] == "-Werror")) {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
state.compiler_only_args.push_back(args[i + 1]);
// flag.
if (args[i] == "-fmodules") {
if (!config.depend_mode() || !config.direct_mode()) {
- log("Compiler option {} is unsupported without direct depend mode",
+ LOG("Compiler option {} is unsupported without direct depend mode",
args[i]);
return Statistic::could_not_use_modules;
} else if (!(config.sloppiness() & SLOPPY_MODULES)) {
- log(
+ LOG_RAW(
"You have to specify \"modules\" sloppiness when using"
" -fmodules to get hits");
return Statistic::could_not_use_modules;
// input file and strip all -x options from the arguments.
if (args[i].length() == 2) {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
if (args_info.input_file.empty()) {
// We need to work out where the output was meant to go.
if (args[i] == "-o") {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
args_info.output_obj = Util::make_relative_path(ctx, args[i + 1]);
if (separate_argument) {
// -MF arg
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
dep_file = args[i + 1];
if (args[i].size() == 3) {
// -MQ arg or -MT arg
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
state.dep_args.push_back(args[i]);
// Alternate form of specifying sysroot without =
if (args[i] == "--sysroot") {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
state.common_args.push_back(args[i]);
// Alternate form of specifying target without =
if (args[i] == "-target") {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
state.common_args.push_back(args[i]);
// -P removes preprocessor information in such a way that the object file
// from compiling the preprocessed file will not be equal to the object
// file produced when compiling without ccache.
- log("Too hard option -Wp,-P detected");
+ LOG_RAW("Too hard option -Wp,-P detected");
return Statistic::unsupported_compiler_option;
} else if (Util::starts_with(args[i], "-Wp,-MD,")
&& args[i].find(',', 8) == std::string::npos) {
} else if (config.direct_mode()) {
// -Wp, can be used to pass too hard options to the preprocessor.
// Hence, disable direct mode.
- log("Unsupported compiler option for direct mode: {}", args[i]);
+ LOG("Unsupported compiler option for direct mode: {}", args[i]);
config.set_direct_mode(false);
}
if (args[i] == "--serialize-diagnostics") {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
args_info.generating_diagnostics = true;
// among multiple users.
i++;
if (i <= args.size() - 1) {
- log("Skipping argument -index-store-path {}", args[i]);
+ LOG("Skipping argument -index-store-path {}", args[i]);
}
return nullopt;
}
// output produced by the compiler will be normalized.
if (compopt_takes_path(args[i])) {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
// Options that take an argument.
if (compopt_takes_arg(args[i])) {
if (i == args.size() - 1) {
- log("Missing argument to {}", args[i]);
+ LOG("Missing argument to {}", args[i]);
return Statistic::bad_compiler_arguments;
}
if (args[i] != "/dev/null") {
auto st = Stat::stat(args[i]);
if (!st || !st.is_regular()) {
- log("{} is not a regular file, not considering as input file", args[i]);
+ LOG("{} is not a regular file, not considering as input file", args[i]);
state.common_args.push_back(args[i]);
return nullopt;
}
if (!args_info.input_file.empty()) {
if (!language_for_file(args[i]).empty()) {
- log("Multiple input files: {} and {}", args_info.input_file, args[i]);
+ LOG("Multiple input files: {} and {}", args_info.input_file, args[i]);
return Statistic::multiple_source_files;
} else if (!state.found_c_opt && !state.found_dc_opt) {
- log("Called for link with {}", args[i]);
+ LOG("Called for link with {}", args[i]);
if (args[i].find("conftest.") != std::string::npos) {
return Statistic::autoconf_test;
} else {
return Statistic::called_for_link;
}
} else {
- log("Unsupported source extension: {}", args[i]);
+ LOG("Unsupported source extension: {}", args[i]);
return Statistic::unsupported_source_language;
}
}
}
if (state.generating_debuginfo_level_3 && !config.run_second_cpp()) {
- log("Generating debug info level 3; not compiling preprocessed code");
+ LOG_RAW("Generating debug info level 3; not compiling preprocessed code");
config.set_run_second_cpp(true);
}
handle_dependency_environment_variables(ctx, state);
if (args_info.input_file.empty()) {
- log("No input file found");
+ LOG_RAW("No input file found");
return Statistic::no_input_file;
}
if (state.found_pch || state.found_fpch_preprocess) {
args_info.using_precompiled_header = true;
if (!(config.sloppiness() & SLOPPY_TIME_MACROS)) {
- log(
+ LOG_RAW(
"You have to specify \"time_macros\" sloppiness when using"
" precompiled headers to get direct hits");
- log("Disabling direct mode");
+ LOG_RAW("Disabling direct mode");
return Statistic::could_not_use_precompiled_header;
}
}
state.file_language = language_for_file(args_info.input_file);
if (!state.explicit_language.empty()) {
if (!language_is_supported(state.explicit_language)) {
- log("Unsupported language: {}", state.explicit_language);
+ LOG("Unsupported language: {}", state.explicit_language);
return Statistic::unsupported_source_language;
}
args_info.actual_language = state.explicit_language;
if (args_info.output_is_precompiled_header
&& !(config.sloppiness() & SLOPPY_PCH_DEFINES)) {
- log(
+ LOG_RAW(
"You have to specify \"pch_defines,time_macros\" sloppiness when"
" creating precompiled headers");
return Statistic::could_not_use_precompiled_header;
if (args_info.output_is_precompiled_header) {
state.common_args.push_back("-c");
} else {
- log("No -c option found");
+ LOG_RAW("No -c option found");
// Having a separate statistic for autoconf tests is useful, as they are
// the dominant form of "called for link" in many cases.
return args_info.input_file.find("conftest.") != std::string::npos
}
if (args_info.actual_language.empty()) {
- log("Unsupported source extension: {}", args_info.input_file);
+ LOG("Unsupported source extension: {}", args_info.input_file);
return Statistic::unsupported_source_language;
}
if (!config.run_second_cpp() && args_info.actual_language == "cu") {
- log("Using CUDA compiler; not compiling preprocessed code");
+ LOG_RAW("Using CUDA compiler; not compiling preprocessed code");
config.set_run_second_cpp(true);
}
if (args_info.output_is_precompiled_header && !config.run_second_cpp()) {
// It doesn't work to create the .gch from preprocessed source.
- log("Creating precompiled header; not compiling preprocessed code");
+ LOG_RAW("Creating precompiled header; not compiling preprocessed code");
config.set_run_second_cpp(true);
}
// Don't try to second guess the compilers heuristics for stdout handling.
if (args_info.output_obj == "-") {
- log("Output file is -");
+ LOG_RAW("Output file is -");
return Statistic::output_to_stdout;
}
if (args_info.seen_split_dwarf) {
size_t pos = args_info.output_obj.rfind('.');
if (pos == std::string::npos || pos == args_info.output_obj.size() - 1) {
- log("Badly formed object filename");
+ LOG_RAW("Badly formed object filename");
return Statistic::bad_compiler_arguments;
}
if (args_info.output_obj != "/dev/null") {
auto st = Stat::stat(args_info.output_obj);
if (st && !st.is_regular()) {
- log("Not a regular file: {}", args_info.output_obj);
+ LOG("Not a regular file: {}", args_info.output_obj);
return Statistic::bad_output_file;
}
}
auto output_dir = std::string(Util::dir_name(args_info.output_obj));
auto st = Stat::stat(output_dir);
if (!st || !st.is_directory()) {
- log("Directory does not exist: {}", output_dir);
+ LOG("Directory does not exist: {}", output_dir);
return Statistic::bad_output_file;
}
#endif
const char CCACHE_NAME[] = MYNAME;
-using Logging::log;
using nonstd::nullopt;
using nonstd::optional;
using nonstd::string_view;
-const char VERSION_TEXT[] =
+constexpr const char VERSION_TEXT[] =
R"({} version {}
Copyright (C) 2002-2007 Andrew Tridgell
version.
)";
-const char USAGE_TEXT[] =
+constexpr const char USAGE_TEXT[] =
R"(Usage:
{} [options]
{} compiler [compiler options]
prefix.push_back(path);
}
- log("Using command-line prefix {}", prefix_command);
+ LOG("Using command-line prefix {}", prefix_command);
for (size_t i = prefix.size(); i != 0; i--) {
args.push_front(prefix[i - 1]);
}
hash.enable_debug(section_name, debug_binary_file.get(), debug_text_file);
ctx.hash_debug_files.push_back(std::move(debug_binary_file));
} else {
- log("Failed to open {}: {}", path, strerror(errno));
+ LOG("Failed to open {}: {}", path, strerror(errno));
}
}
}
if (!st.is_regular()) {
// Device, pipe, socket or other strange creature.
- log("Non-regular include file {}", path);
+ LOG("Non-regular include file {}", path);
return false;
}
// under "Performance" in doc/MANUAL.adoc.
if (!(ctx.config.sloppiness() & SLOPPY_INCLUDE_FILE_MTIME)
&& st.mtime() >= ctx.time_of_compilation) {
- log("Include file {} too new", path);
+ LOG("Include file {} too new", path);
return false;
}
// The same >= logic as above applies to the change time of the file.
if (!(ctx.config.sloppiness() & SLOPPY_INCLUDE_FILE_CTIME)
&& st.ctime() >= ctx.time_of_compilation) {
- log("Include file {} ctime too new", path);
+ LOG("Include file {} ctime too new", path);
return false;
}
is_pch = Util::is_precompiled_header(path);
if (is_pch) {
if (ctx.included_pch_file.empty()) {
- log("Detected use of precompiled header: {}", path);
+ LOG("Detected use of precompiled header: {}", path);
}
bool using_pch_sum = false;
if (ctx.config.pch_external_checksum()) {
if (Stat::stat(pch_sum_path, Stat::OnError::log)) {
path = std::move(pch_sum_path);
using_pch_sum = true;
- log("Using pch.sum file {}", path);
+ LOG("Using pch.sum file {}", path);
}
}
{
if (!do_remember_include_file(ctx, path, cpp_hash, system, depend_mode_hash)
&& ctx.config.direct_mode()) {
- log("Disabling direct mode");
+ LOG_RAW("Disabling direct mode");
ctx.config.set_direct_mode(false);
}
}
}
q++;
if (q >= end) {
- log("Failed to parse included file path");
+ LOG_RAW("Failed to parse included file path");
return false;
}
// q points to the beginning of an include file path
// part of inline assembly, refers to an external file. If the file
// changes, the hash should change as well, but finding out what file to
// hash is too hard for ccache, so just bail out.
- log(
+ LOG_RAW(
"Found unsupported .inc"
"bin directive in source code");
throw Failure(Statistic::unsupported_code_directive);
use_relative_paths_in_depfile(const Context& ctx)
{
if (ctx.config.base_dir().empty()) {
- log("Base dir not set, skip using relative paths");
+ LOG_RAW("Base dir not set, skip using relative paths");
return; // nothing to do
}
if (!ctx.has_absolute_include_headers) {
- log("No absolute path for included files found, skip using relative paths");
+ LOG_RAW(
+ "No absolute path for included files found, skip using relative paths");
return; // nothing to do
}
try {
file_content = Util::read_file(output_dep);
} catch (const Error& e) {
- log("Cannot open dependency file {}: {}", output_dep, e.what());
+ LOG("Cannot open dependency file {}: {}", output_dep, e.what());
return;
}
const auto new_content = rewrite_dep_file_paths(ctx, file_content);
if (new_content) {
Util::write_file(output_dep, *new_content);
} else {
- log("No paths in dependency file {} made relative", output_dep);
+ LOG("No paths in dependency file {} made relative", output_dep);
}
}
try {
file_content = Util::read_file(ctx.args_info.output_dep);
} catch (const Error& e) {
- log(
+ LOG(
"Cannot open dependency file {}: {}", ctx.args_info.output_dep, e.what());
return nullopt;
}
if (errors.find("unrecognized command line option") != std::string::npos
&& errors.find("-fdiagnostics-color") != std::string::npos) {
// Old versions of GCC do not support colored diagnostics.
- log("-fdiagnostics-color is unsupported; trying again without it");
+ LOG_RAW("-fdiagnostics-color is unsupported; trying again without it");
tmp_stdout.fd = Fd(open(
tmp_stdout.path.c_str(), O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0600));
if (!tmp_stdout.fd) {
- log("Failed to truncate {}: {}", tmp_stdout.path, strerror(errno));
+ LOG("Failed to truncate {}: {}", tmp_stdout.path, strerror(errno));
throw Failure(Statistic::internal_error);
}
tmp_stderr.fd = Fd(open(
tmp_stderr.path.c_str(), O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0600));
if (!tmp_stderr.fd) {
- log("Failed to truncate {}: {}", tmp_stderr.path, strerror(errno));
+ LOG("Failed to truncate {}: {}", tmp_stderr.path, strerror(errno));
throw Failure(Statistic::internal_error);
}
(ctx.config.sloppiness() & SLOPPY_FILE_STAT_MATCHES)
|| ctx.args_info.output_is_precompiled_header;
- log("Adding result name to {}", *ctx.manifest_path());
+ LOG("Adding result name to {}", *ctx.manifest_path());
if (!Manifest::put(ctx.config,
*ctx.manifest_path(),
*ctx.result_name(),
ctx.included_files,
ctx.time_of_compilation,
save_timestamp)) {
- log("Failed to add result name to {}", *ctx.manifest_path());
+ LOG("Failed to add result name to {}", *ctx.manifest_path());
} else {
const auto new_stat = Stat::stat(*ctx.manifest_path(), Stat::OnError::log);
ctx.manifest_counter_updates.increment(
try {
Util::write_file(path, cachedir_tag);
} catch (const Error& e) {
- log("Failed to create {}: {}", path, e.what());
+ LOG("Failed to create {}: {}", path, e.what());
}
}
std::string unmangled_form = Result::gcno_file_in_unmangled_form(ctx);
std::string found_file;
if (Stat::stat(mangled_form)) {
- log("Found coverage file {}", mangled_form);
+ LOG("Found coverage file {}", mangled_form);
found_file = mangled_form;
}
if (Stat::stat(unmangled_form)) {
- log("Found coverage file {}", unmangled_form);
+ LOG("Found coverage file {}", unmangled_form);
if (!found_file.empty()) {
- log("Found two coverage files, cannot continue");
+ LOG_RAW("Found two coverage files, cannot continue");
return {};
}
found_file = unmangled_form;
}
if (found_file.empty()) {
- log("No coverage file found (tried {} and {}), cannot continue",
+ LOG("No coverage file found (tried {} and {}), cannot continue",
unmangled_form,
mangled_form);
return {};
// non-existent .dwo files.
if (unlink(ctx.args_info.output_dwo.c_str()) != 0 && errno != ENOENT
&& errno != ESTALE) {
- log("Failed to unlink {}: {}", ctx.args_info.output_dwo, strerror(errno));
+ LOG("Failed to unlink {}: {}", ctx.args_info.output_dwo, strerror(errno));
throw Failure(Statistic::bad_output_file);
}
}
- log("Running real compiler");
+ LOG_RAW("Running real compiler");
MTR_BEGIN("execute", "compiler");
TemporaryFile tmp_stdout(
// distcc-pump outputs lines like this:
// __________Using # distcc servers in pump mode
if (st.size() != 0 && ctx.guessed_compiler != GuessedCompiler::pump) {
- log("Compiler produced stdout");
+ LOG_RAW("Compiler produced stdout");
throw Failure(Statistic::compiler_produced_stdout);
}
}
if (status != 0) {
- log("Compiler gave exit status {}", status);
+ LOG("Compiler gave exit status {}", status);
// We can output stderr immediately instead of rerunning the compiler.
Util::send_to_stderr(ctx, Util::read_file(tmp_stderr_path));
const auto obj_stat = Stat::stat(ctx.args_info.output_obj);
if (!obj_stat) {
- log("Compiler didn't produce an object file");
+ LOG_RAW("Compiler didn't produce an object file");
throw Failure(Statistic::compiler_produced_no_output);
}
if (obj_stat.size() == 0) {
- log("Compiler produced an empty object file");
+ LOG_RAW("Compiler produced an empty object file");
throw Failure(Statistic::compiler_produced_empty_output);
}
auto error = result_writer.finalize();
if (error) {
- log("Error: {}", *error);
+ LOG("Error: {}", *error);
} else {
- log("Stored in cache: {}", result_file.path);
+ LOG("Stored in cache: {}", result_file.path);
}
auto new_result_stat = Stat::stat(result_file.path, Stat::OnError::log);
}
args.push_back(ctx.args_info.input_file);
add_prefix(ctx, args, ctx.config.prefix_command_cpp());
- log("Running preprocessor");
+ LOG_RAW("Running preprocessor");
MTR_BEGIN("execute", "preprocessor");
status =
do_execute(ctx, args, std::move(tmp_stdout), std::move(tmp_stderr));
}
if (status != 0) {
- log("Preprocessor gave exit status {}", status);
+ LOG("Preprocessor gave exit status {}", status);
throw Failure(Statistic::preprocessor_error);
}
hash.hash_delimiter("cppstderr");
if (!ctx.args_info.direct_i_file && !hash.hash_file(stderr_path)) {
// Somebody removed the temporary file?
- log("Failed to open {}: {}", stderr_path, strerror(errno));
+ LOG("Failed to open {}: {}", stderr_path, strerror(errno));
throw Failure(Statistic::internal_error);
}
} else { // command string
if (!hash_multicommand_output(
hash, ctx.config.compiler_check(), ctx.orig_args[0])) {
- log("Failure running compiler check command: {}",
+ LOG("Failure running compiler check command: {}",
ctx.config.compiler_check());
throw Failure(Statistic::compiler_check_failed);
}
if (sep_pos != std::string::npos) {
std::string old_path = map.substr(0, sep_pos);
std::string new_path = map.substr(sep_pos + 1);
- log("Relocating debuginfo from {} to {} (CWD: {})",
+ LOG("Relocating debuginfo from {} to {} (CWD: {})",
old_path,
new_path,
ctx.apparent_cwd);
}
}
}
- log("Hashing CWD {}", dir_to_hash);
+ LOG("Hashing CWD {}", dir_to_hash);
hash.hash_delimiter("cwd");
hash.hash(dir_to_hash);
}
string_view stem =
Util::remove_extension(Util::base_name(ctx.args_info.output_obj));
std::string gcda_path = fmt::format("{}/{}.gcda", dir, stem);
- log("Hashing coverage path {}", gcda_path);
+ LOG("Hashing coverage path {}", gcda_path);
hash.hash_delimiter("gcda");
hash.hash(gcda_path);
}
// Possibly hash the sanitize blacklist file path.
for (const auto& sanitize_blacklist : args_info.sanitize_blacklists) {
- log("Hashing sanitize blacklist {}", sanitize_blacklist);
+ LOG("Hashing sanitize blacklist {}", sanitize_blacklist);
hash.hash("sanitizeblacklist");
if (!hash_binary_file(ctx, hash, sanitize_blacklist)) {
throw Failure(Statistic::error_hashing_extra_file);
if (!ctx.config.extra_files_to_hash().empty()) {
for (const std::string& path : Util::split_into_strings(
ctx.config.extra_files_to_hash(), PATH_DELIM)) {
- log("Hashing extra file {}", path);
+ LOG("Hashing extra file {}", path);
hash.hash_delimiter("extrafile");
if (!hash_binary_file(ctx, hash, path)) {
throw Failure(Statistic::error_hashing_extra_file);
bool found = false;
for (const std::string& p : paths_to_try) {
- log("Checking for profile data file {}", p);
+ LOG("Checking for profile data file {}", p);
auto st = Stat::stat(p);
if (st && !st.is_directory()) {
- log("Adding profile data {} to the hash", p);
+ LOG("Adding profile data {} to the hash", p);
hash.hash_delimiter("-fprofile-use");
if (hash_binary_file(ctx, hash, p)) {
found = true;
for (size_t i = 1; i < args.size(); i++) {
// Trust the user if they've said we should not hash a given option.
if (option_should_be_ignored(args[i], ctx.ignore_options())) {
- log("Not hashing ignored option: {}", args[i]);
+ LOG("Not hashing ignored option: {}", args[i]);
if (i + 1 < args.size() && compopt_takes_arg(args[i])) {
i++;
- log("Not hashing argument of ignored option: {}", args[i]);
+ LOG("Not hashing argument of ignored option: {}", args[i]);
}
continue;
}
if (ctx.args_info.profile_generate) {
ASSERT(!ctx.args_info.profile_path.empty());
- log("Adding profile directory {} to our hash", ctx.args_info.profile_path);
+ LOG("Adding profile directory {} to our hash", ctx.args_info.profile_path);
hash.hash_delimiter("-fprofile-dir");
hash.hash(ctx.args_info.profile_path);
}
if (ctx.args_info.profile_use && !hash_profile_data_file(ctx, hash)) {
- log("No profile data file found");
+ LOG_RAW("No profile data file found");
throw Failure(Statistic::no_input_file);
}
throw Failure(Statistic::internal_error);
}
if (result & HASH_SOURCE_CODE_FOUND_TIME) {
- log("Disabling direct mode");
+ LOG_RAW("Disabling direct mode");
ctx.config.set_direct_mode(false);
return nullopt;
}
ctx.set_manifest_path(manifest_file.path);
if (manifest_file.stat) {
- log("Looking for result name in {}", manifest_file.path);
+ LOG("Looking for result name in {}", manifest_file.path);
MTR_BEGIN("manifest", "manifest_get");
result_name = Manifest::get(ctx, manifest_file.path);
MTR_END("manifest", "manifest_get");
if (result_name) {
- log("Got result name from manifest");
+ LOG_RAW("Got result name from manifest");
} else {
- log("Did not find result name in manifest");
+ LOG_RAW("Did not find result name in manifest");
}
} else {
- log("No manifest with name {} in the cache", manifest_name.to_string());
+ LOG("No manifest with name {} in the cache", manifest_name.to_string());
}
} else {
if (ctx.args_info.arch_args.empty()) {
result_name = get_result_name_from_cpp(ctx, preprocessor_args, hash);
- log("Got result name from preprocessor");
+ LOG_RAW("Got result name from preprocessor");
} else {
preprocessor_args.push_back("-arch");
for (size_t i = 0; i < ctx.args_info.arch_args.size(); ++i) {
preprocessor_args.push_back(ctx.args_info.arch_args[i]);
result_name = get_result_name_from_cpp(ctx, preprocessor_args, hash);
- log("Got result name from preprocessor with -arch {}",
+ LOG("Got result name from preprocessor with -arch {}",
ctx.args_info.arch_args[i]);
if (i != ctx.args_info.arch_args.size() - 1) {
result_name = nullopt;
|| ctx.guessed_compiler == GuessedCompiler::unknown)
&& ctx.args_info.output_is_precompiled_header
&& !ctx.args_info.fno_pch_timestamp && mode == FromCacheCallMode::cpp) {
- log("Not considering cached precompiled header in preprocessor mode");
+ LOG_RAW("Not considering cached precompiled header in preprocessor mode");
return nullopt;
}
const auto result_file = look_up_cache_file(
ctx.config.cache_dir(), *ctx.result_name(), Result::k_file_suffix);
if (!result_file.stat) {
- log("No result with name {} in the cache", ctx.result_name()->to_string());
+ LOG("No result with name {} in the cache", ctx.result_name()->to_string());
return nullopt;
}
ctx.set_result_path(result_file.path);
auto error = result_reader.read(result_retriever);
MTR_END("cache", "from_cache");
if (error) {
- log("Failed to get result from cache: {}", *error);
+ LOG("Failed to get result from cache: {}", *error);
return nullopt;
}
// Update modification timestamp to save file from LRU cleanup.
Util::update_mtime(*ctx.result_path());
- log("Succeeded getting cached result");
+ LOG_RAW("Succeeded getting cached result");
return mode == FromCacheCallMode::direct ? Statistic::direct_cache_hit
: Statistic::preprocessed_cache_hit;
ctx.original_umask = umask(ctx.config.umask());
}
- log("=== CCACHE {} STARTED =========================================",
+ LOG("=== CCACHE {} STARTED =========================================",
CCACHE_VERSION);
if (getenv("CCACHE_INTERNAL_TRACE")) {
#ifdef MTR_ENABLED
ctx.mini_trace = std::make_unique<MiniTrace>(ctx.args_info);
#else
- log("Error: tracing is not enabled!");
+ LOG_RAW("Error: tracing is not enabled!");
#endif
}
}
int uncached_fd =
dup(STDERR_FILENO); // The file descriptor is intentionally leaked.
if (uncached_fd == -1) {
- log("dup(2) failed: {}", strerror(errno));
+ LOG("dup(2) failed: {}", strerror(errno));
throw Failure(Statistic::internal_error);
}
const std::string& value,
const std::string& origin)
{
- Logging::bulk_log("Config: ({}) {} = {}", origin, key, value);
+ BULK_LOG("Config: ({}) {} = {}", origin, key, value);
}
static void
ctx.config.cache_dir(), wanted_level, name.to_string() + file_suffix);
if (current_path != wanted_path) {
Util::ensure_dir_exists(Util::dir_name(wanted_path));
- log("Moving {} to {}", current_path, wanted_path);
+ LOG("Moving {} to {}", current_path, wanted_path);
try {
Util::rename(current_path, wanted_path);
} catch (const Error&) {
if (config.disable()) {
// Just log result, don't update statistics.
- log("Result: disabled");
+ LOG_RAW("Result: disabled");
return;
}
if (!config.log_file().empty() || config.debug()) {
const auto result = Statistics::get_result(ctx.counter_updates);
if (result) {
- log("Result: {}", *result);
+ LOG("Result: {}", *result);
}
}
if (config.max_files() != 0
&& counters->get(Statistic::files_in_cache) > config.max_files() / 16) {
- log("Need to clean up {} since it holds {} files (limit: {} files)",
+ LOG("Need to clean up {} since it holds {} files (limit: {} files)",
subdir,
counters->get(Statistic::files_in_cache),
config.max_files() / 16);
if (config.max_size() != 0
&& counters->get(Statistic::cache_size_kibibyte)
> config.max_size() / 1024 / 16) {
- log("Need to clean up {} since it holds {} KiB (limit: {} KiB)",
+ LOG("Need to clean up {} since it holds {} KiB (limit: {} KiB)",
subdir,
counters->get(Statistic::cache_size_kibibyte),
config.max_size() / 1024 / 16);
finalize_stats_and_trigger_cleanup(ctx);
} catch (const ErrorBase& e) {
// finalize_at_exit must not throw since it's called by a destructor.
- log("Error while finalizing stats: {}", e.what());
+ LOG("Error while finalizing stats: {}", e.what());
}
// Dump log buffer last to not lose any logs.
ctx.orig_args.erase_with_prefix("--ccache-");
add_prefix(ctx, ctx.orig_args, ctx.config.prefix_command());
- log("Failed; falling back to running the real compiler");
+ LOG_RAW("Failed; falling back to running the real compiler");
saved_orig_args = std::move(ctx.orig_args);
auto execv_argv = saved_orig_args.to_argv();
- log("Executing {}", Util::format_argv_for_logging(execv_argv.data()));
+ LOG("Executing {}", Util::format_argv_for_logging(execv_argv.data()));
// Run execv below after ctx and finalizer have been destructed.
}
}
do_cache_compilation(Context& ctx, const char* const* argv)
{
if (ctx.actual_cwd.empty()) {
- log("Unable to determine current working directory: {}", strerror(errno));
+ LOG("Unable to determine current working directory: {}", strerror(errno));
throw Failure(Statistic::internal_error);
}
}
if (ctx.config.disable()) {
- log("ccache is disabled");
+ LOG_RAW("ccache is disabled");
// Statistic::cache_miss is a dummy to trigger stats_flush.
throw Failure(Statistic::cache_miss);
}
set_up_uncached_err();
MTR_END("main", "set_up_uncached_err");
- log("Command line: {}", Util::format_argv_for_logging(argv));
- log("Hostname: {}", Util::get_hostname());
- log("Working directory: {}", ctx.actual_cwd);
+ LOG("Command line: {}", Util::format_argv_for_logging(argv));
+ LOG("Hostname: {}", Util::get_hostname());
+ LOG("Working directory: {}", ctx.actual_cwd);
if (ctx.apparent_cwd != ctx.actual_cwd) {
- log("Apparent working directory: {}", ctx.apparent_cwd);
+ LOG("Apparent working directory: {}", ctx.apparent_cwd);
}
ctx.config.set_limit_multiple(
&& (!ctx.args_info.generating_dependencies
|| ctx.args_info.output_dep == "/dev/null"
|| !ctx.config.run_second_cpp())) {
- log("Disabling depend mode");
+ LOG_RAW("Disabling depend mode");
ctx.config.set_depend_mode(false);
}
- log("Source file: {}", ctx.args_info.input_file);
+ LOG("Source file: {}", ctx.args_info.input_file);
if (ctx.args_info.generating_dependencies) {
- log("Dependency file: {}", ctx.args_info.output_dep);
+ LOG("Dependency file: {}", ctx.args_info.output_dep);
}
if (ctx.args_info.generating_coverage) {
- log("Coverage file is being generated");
+ LOG_RAW("Coverage file is being generated");
}
if (ctx.args_info.generating_stackusage) {
- log("Stack usage file: {}", ctx.args_info.output_su);
+ LOG("Stack usage file: {}", ctx.args_info.output_su);
}
if (ctx.args_info.generating_diagnostics) {
- log("Diagnostics file: {}", ctx.args_info.output_dia);
+ LOG("Diagnostics file: {}", ctx.args_info.output_dia);
}
if (!ctx.args_info.output_dwo.empty()) {
- log("Split dwarf file: {}", ctx.args_info.output_dwo);
+ LOG("Split dwarf file: {}", ctx.args_info.output_dwo);
}
- log("Object file: {}", ctx.args_info.output_obj);
+ LOG("Object file: {}", ctx.args_info.output_obj);
MTR_META_THREAD_NAME(ctx.args_info.output_obj.c_str());
if (ctx.config.debug()) {
if (debug_text_file) {
ctx.hash_debug_files.push_back(std::move(debug_text_file));
} else {
- log("Failed to open {}: {}", path, strerror(errno));
+ LOG("Failed to open {}: {}", path, strerror(errno));
}
}
optional<Digest> result_name;
optional<Digest> result_name_from_manifest;
if (ctx.config.direct_mode()) {
- log("Trying direct lookup");
+ LOG_RAW("Trying direct lookup");
MTR_BEGIN("hash", "direct_hash");
Args dummy_args;
result_name =
}
if (ctx.config.read_only_direct()) {
- log("Read-only direct mode; running real compiler");
+ LOG_RAW("Read-only direct mode; running real compiler");
throw Failure(Statistic::cache_miss);
}
// The best thing here would probably be to remove the hash entry from
// the manifest. For now, we use a simpler method: just remove the
// manifest file.
- log("Hash from manifest doesn't match preprocessor output");
- log("Likely reason: different CCACHE_BASEDIRs used");
- log("Removing manifest as a safety measure");
+ LOG_RAW("Hash from manifest doesn't match preprocessor output");
+ LOG_RAW("Likely reason: different CCACHE_BASEDIRs used");
+ LOG_RAW("Removing manifest as a safety measure");
Util::unlink_safe(*ctx.manifest_path());
put_result_in_manifest = true;
}
if (ctx.config.read_only()) {
- log("Read-only mode; running real compiler");
+ LOG_RAW("Read-only mode; running real compiler");
throw Failure(Statistic::cache_miss);
}
#include <algorithm>
-using Logging::log;
-
static void
delete_file(const std::string& path,
uint64_t size,
{
bool deleted = Util::unlink_safe(path, Util::UnlinkLog::ignore_failure);
if (!deleted && errno != ENOENT && errno != ESTALE) {
- log("Failed to unlink {} ({})", path, strerror(errno));
+ LOG("Failed to unlink {} ({})", path, strerror(errno));
} else if (cache_size && files_in_cache) {
// The counters are intentionally subtracted even if there was no file to
// delete since the final cache size calculation will be incorrect if they
uint64_t max_age,
const Util::ProgressReceiver& progress_receiver)
{
- log("Cleaning up cache directory {}", subdir);
+ LOG("Cleaning up cache directory {}", subdir);
std::vector<std::shared_ptr<CacheFile>> files;
Util::get_level_1_files(
return f1->lstat().mtime() < f2->lstat().mtime();
});
- log("Before cleanup: {:.0f} KiB, {:.0f} files",
+ LOG("Before cleanup: {:.0f} KiB, {:.0f} files",
static_cast<double>(cache_size) / 1024,
static_cast<double>(files_in_cache));
cleaned = true;
}
- log("After cleanup: {:.0f} KiB, {:.0f} files",
+ LOG("After cleanup: {:.0f} KiB, {:.0f} files",
static_cast<double>(cache_size) / 1024,
static_cast<double>(files_in_cache));
if (cleaned) {
- log("Cleaned up cache directory {}", subdir);
+ LOG("Cleaned up cache directory {}", subdir);
}
update_counters(subdir, files_in_cache, cache_size, cleaned);
wipe_dir(const std::string& subdir,
const Util::ProgressReceiver& progress_receiver)
{
- log("Clearing out cache directory {}", subdir);
+ LOG("Clearing out cache directory {}", subdir);
std::vector<std::shared_ptr<CacheFile>> files;
Util::get_level_1_files(
const bool cleared = !files.empty();
if (cleared) {
- log("Cleared out cache directory {}", subdir);
+ LOG("Cleared out cache directory {}", subdir);
}
update_counters(subdir, 0, 0, cleared);
}
#include <string>
#include <thread>
-using Logging::log;
using nonstd::optional;
namespace {
return;
}
- log("Recompressing {} to {}",
+ LOG("Recompressing {} to {}",
cache_file.path(),
level ? fmt::format("level {}", wanted_level) : "uncompressed");
AtomicFile atomic_new_file(cache_file.path(), AtomicFile::Mode::binary);
statistics.update(content_size, old_stat.size(), new_stat.size(), 0);
- log("Recompression of {} done", cache_file.path());
+ LOG("Recompression of {} done", cache_file.path());
}
} // namespace
# include "Win32Util.hpp"
#endif
-using Logging::log;
using nonstd::string_view;
#ifdef _WIN32
}
if (ret == 0) {
DWORD error = GetLastError();
- log("failed to execute {}: {} ({})",
+ LOG("failed to execute {}: {} ({})",
full_path,
Win32Util::error_message(error),
error);
int
execute(const char* const* argv, Fd&& fd_out, Fd&& fd_err, pid_t* pid)
{
- log("Executing {}", Util::format_argv_for_logging(argv));
+ LOG("Executing {}", Util::format_argv_for_logging(argv));
{
SignalHandlerBlocker signal_handler_blocker;
path = getenv("PATH");
}
if (path.empty()) {
- log("No PATH variable");
+ LOG_RAW("No PATH variable");
return {};
}
# include <immintrin.h>
#endif
-using Logging::log;
using nonstd::string_view;
namespace {
hash.hash(str);
if (result & HASH_SOURCE_CODE_FOUND_DATE) {
- log("Found __DATE__ in {}", path);
+ LOG("Found __DATE__ in {}", path);
// Make sure that the hash sum changes if the (potential) expansion of
// __DATE__ changes.
// not very useful since the chance that we get a cache hit later the same
// second should be quite slim... So, just signal back to the caller that
// __TIME__ has been found so that the direct mode can be disabled.
- log("Found __TIME__ in {}", path);
+ LOG("Found __TIME__ in {}", path);
}
if (result & HASH_SOURCE_CODE_FOUND_TIMESTAMP) {
- log("Found __TIMESTAMP__ in {}", path);
+ LOG("Found __TIMESTAMP__ in {}", path);
// Make sure that the hash sum changes if the (potential) expansion of
// __TIMESTAMP__ changes.
}
auto argv = args.to_argv();
- log("Executing compiler check command {}",
+ LOG("Executing compiler check command {}",
Util::format_argv_for_logging(argv.data()));
#ifdef _WIN32
int fd = _open_osfhandle((intptr_t)pipe_out[0], O_BINARY);
bool ok = hash.hash_fd(fd);
if (!ok) {
- log("Error hashing compiler check command output: {}", strerror(errno));
+ LOG("Error hashing compiler check command output: {}", strerror(errno));
}
WaitForSingleObject(pi.hProcess, INFINITE);
DWORD exitcode;
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
if (exitcode != 0) {
- log("Compiler check command returned {}", exitcode);
+ LOG("Compiler check command returned {}", exitcode);
return false;
}
return ok;
close(pipefd[1]);
bool ok = hash.hash_fd(pipefd[0]);
if (!ok) {
- log("Error hashing compiler check command output: {}", strerror(errno));
+ LOG("Error hashing compiler check command output: {}", strerror(errno));
}
close(pipefd[0]);
if (result == -1 && errno == EINTR) {
continue;
}
- log("waitpid failed: {}", strerror(errno));
+ LOG("waitpid failed: {}", strerror(errno));
return false;
}
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- log("Compiler check command returned {}", WEXITSTATUS(status));
+ LOG("Compiler check command returned {}", WEXITSTATUS(status));
return false;
}
return ok;
value: 999999
- key: readability-function-size.ParameterThreshold
value: 7
- - key: readability-function-size.NestingThreshold
- value: 6
- key: readability-function-size.NestingThreshold
value: 999999
- key: readability-function-size.VariableThreshold