From 836abffe5eb34485e755a6976815656d28984e92 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 28 May 2019 14:58:03 +0200 Subject: [PATCH 001/127] Use GoogleTest instead of our own framework Fixes #1248. --- .github/workflows/macos.yml | 2 +- CMakeLists.txt | 13 ++- appveyor.yml | 4 - configure.py | 35 -------- src/includes_normalize_test.cc | 4 +- src/ninja_test.cc | 148 +-------------------------------- src/test.h | 87 +------------------ 7 files changed, 20 insertions(+), 273 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 411cfe1405..e32bb87409 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -21,7 +21,7 @@ jobs: env: MACOSX_DEPLOYMENT_TARGET: 10.12 run: | - cmake -DCMAKE_BUILD_TYPE=Release -B build + CXXFLAGS=-std=c++11 cmake -DCMAKE_BUILD_TYPE=Release -B build cmake --build build --parallel --config Release - name: Test ninja diff --git a/CMakeLists.txt b/CMakeLists.txt index 7f03c3599c..341eb1c52f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -163,6 +163,17 @@ endif() include(CTest) if(BUILD_TESTING) + find_package(GTest) + if(NOT GTest_FOUND) + include(FetchContent) + FetchContent_Declare( + googletest + URL https://github.com/google/googletest/archive/release-1.10.0.tar.gz + URL_HASH SHA1=9c89be7df9c5e8cb0bc20b3c4b39bf7e82686770 + ) + FetchContent_MakeAvailable(googletest) + endif() + # Tests all build into ninja_test executable. add_executable(ninja_test src/build_log_test.cc @@ -187,7 +198,7 @@ if(BUILD_TESTING) if(WIN32) target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc) endif() - target_link_libraries(ninja_test PRIVATE libninja libninja-re2c) + target_link_libraries(ninja_test PRIVATE libninja libninja-re2c gtest) foreach(perftest build_log_perftest diff --git a/appveyor.yml b/appveyor.yml index f0b92b8e78..7859e97111 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -30,7 +30,6 @@ for: pacman -S --quiet --noconfirm --needed re2c 2>&1\n ./configure.py --bootstrap --platform mingw 2>&1\n ./ninja all\n - ./ninja_test 2>&1\n ./misc/ninja_syntax_test.py 2>&1\n\"@" - matrix: @@ -44,8 +43,6 @@ for: ninja.bootstrap.exe all - ninja_test - python misc/ninja_syntax_test.py - matrix: @@ -54,7 +51,6 @@ for: build_script: - ./configure.py --bootstrap - ./ninja all - - ./ninja_test - misc/ninja_syntax_test.py - misc/output_test.py diff --git a/configure.py b/configure.py index cded265742..ed56faba44 100755 --- a/configure.py +++ b/configure.py @@ -559,41 +559,6 @@ def has_re2c(): # build.ninja file. n = ninja_writer -n.comment('Tests all build into ninja_test executable.') - -objs = [] -if platform.is_msvc(): - cxxvariables = [('pdb', 'ninja_test.pdb')] - -for name in ['build_log_test', - 'build_test', - 'clean_test', - 'clparser_test', - 'depfile_parser_test', - 'deps_log_test', - 'dyndep_parser_test', - 'disk_interface_test', - 'edit_distance_test', - 'graph_test', - 'lexer_test', - 'manifest_parser_test', - 'ninja_test', - 'state_test', - 'string_piece_util_test', - 'subprocess_test', - 'test', - 'util_test']: - objs += cxx(name, variables=cxxvariables) -if platform.is_windows(): - for name in ['includes_normalize_test', 'msvc_helper_test']: - objs += cxx(name, variables=cxxvariables) - -ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib, - variables=[('libs', libs)]) -n.newline() -all_targets += ninja_test - - n.comment('Ancillary executables.') if platform.is_aix() and '-maix64' not in ldflags: diff --git a/src/includes_normalize_test.cc b/src/includes_normalize_test.cc index 9214f53495..12965f9fb1 100644 --- a/src/includes_normalize_test.cc +++ b/src/includes_normalize_test.cc @@ -117,10 +117,10 @@ TEST(IncludesNormalize, LongInvalidPath) { // Construct max size path having cwd prefix. // kExactlyMaxPath = "$cwd\\a\\aaaa...aaaa\0"; char kExactlyMaxPath[_MAX_PATH + 1]; - ASSERT_NE(_getcwd(kExactlyMaxPath, sizeof kExactlyMaxPath), NULL); + ASSERT_STRNE(_getcwd(kExactlyMaxPath, sizeof kExactlyMaxPath), NULL); int cwd_len = strlen(kExactlyMaxPath); - ASSERT_LE(cwd_len + 3 + 1, _MAX_PATH) + ASSERT_LE(cwd_len + 3 + 1, _MAX_PATH); kExactlyMaxPath[cwd_len] = '\\'; kExactlyMaxPath[cwd_len + 1] = 'a'; kExactlyMaxPath[cwd_len + 2] = '\\'; diff --git a/src/ninja_test.cc b/src/ninja_test.cc index b40e1769c6..7616c85bcd 100644 --- a/src/ninja_test.cc +++ b/src/ninja_test.cc @@ -12,151 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include -#include - -#ifdef _WIN32 -#include "getopt.h" -#elif defined(_AIX) -#include "getopt.h" -#include -#else -#include -#endif - -#include "test.h" -#include "line_printer.h" - -using namespace std; - -struct RegisteredTest { - testing::Test* (*factory)(); - const char *name; - bool should_run; -}; -// This can't be a vector because tests call RegisterTest from static -// initializers and the order static initializers run it isn't specified. So -// the vector constructor isn't guaranteed to run before all of the -// RegisterTest() calls. -static RegisteredTest tests[10000]; -testing::Test* g_current_test; -static int ntests; -static LinePrinter printer; - -void RegisterTest(testing::Test* (*factory)(), const char* name) { - tests[ntests].factory = factory; - tests[ntests++].name = name; -} - -namespace { -string StringPrintf(const char* format, ...) { - const int N = 1024; - char buf[N]; - - va_list ap; - va_start(ap, format); - vsnprintf(buf, N, format, ap); - va_end(ap); - - return buf; -} - -void Usage() { - fprintf(stderr, -"usage: ninja_tests [options]\n" -"\n" -"options:\n" -" --gtest_filter=POSTIVE_PATTERN[-NEGATIVE_PATTERN]\n" -" Run tests whose names match the positive but not the negative pattern.\n" -" '*' matches any substring. (gtest's ':', '?' are not implemented).\n"); -} - -bool PatternMatchesString(const char* pattern, const char* str) { - switch (*pattern) { - case '\0': - case '-': return *str == '\0'; - case '*': return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || - PatternMatchesString(pattern + 1, str); - default: return *pattern == *str && - PatternMatchesString(pattern + 1, str + 1); - } -} - -bool TestMatchesFilter(const char* test, const char* filter) { - // Split --gtest_filter at '-' into positive and negative filters. - const char* const dash = strchr(filter, '-'); - const char* pos = dash == filter ? "*" : filter; //Treat '-test1' as '*-test1' - const char* neg = dash ? dash + 1 : ""; - return PatternMatchesString(pos, test) && !PatternMatchesString(neg, test); -} - -bool ReadFlags(int* argc, char*** argv, const char** test_filter) { - enum { OPT_GTEST_FILTER = 1 }; - const option kLongOptions[] = { - { "gtest_filter", required_argument, NULL, OPT_GTEST_FILTER }, - { NULL, 0, NULL, 0 } - }; - - int opt; - while ((opt = getopt_long(*argc, *argv, "h", kLongOptions, NULL)) != -1) { - switch (opt) { - case OPT_GTEST_FILTER: - if (strchr(optarg, '?') == NULL && strchr(optarg, ':') == NULL) { - *test_filter = optarg; - break; - } // else fall through. - default: - Usage(); - return false; - } - } - *argv += optind; - *argc -= optind; - return true; -} - -} // namespace - -bool testing::Test::Check(bool condition, const char* file, int line, - const char* error) { - if (!condition) { - printer.PrintOnNewLine( - StringPrintf("*** Failure in %s:%d\n%s\n", file, line, error)); - failed_ = true; - } - return condition; -} +#include int main(int argc, char **argv) { - int tests_started = 0; - - const char* test_filter = "*"; - if (!ReadFlags(&argc, &argv, &test_filter)) - return 1; - - int nactivetests = 0; - for (int i = 0; i < ntests; i++) - if ((tests[i].should_run = TestMatchesFilter(tests[i].name, test_filter))) - ++nactivetests; - - bool passed = true; - for (int i = 0; i < ntests; i++) { - if (!tests[i].should_run) continue; - - ++tests_started; - testing::Test* test = tests[i].factory(); - printer.Print( - StringPrintf("[%d/%d] %s", tests_started, nactivetests, tests[i].name), - LinePrinter::ELIDE); - test->SetUp(); - test->Run(); - test->TearDown(); - if (test->Failed()) - passed = false; - delete test; - } - - printer.PrintOnNewLine(passed ? "passed\n" : "failed\n"); - return passed ? EXIT_SUCCESS : EXIT_FAILURE; + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/src/test.h b/src/test.h index 4552c34c88..3565c382ce 100644 --- a/src/test.h +++ b/src/test.h @@ -15,94 +15,11 @@ #ifndef NINJA_TEST_H_ #define NINJA_TEST_H_ +#include + #include "disk_interface.h" #include "manifest_parser.h" #include "state.h" -#include "util.h" - -// A tiny testing framework inspired by googletest, but much simpler and -// faster to compile. It supports most things commonly used from googltest. The -// most noticeable things missing: EXPECT_* and ASSERT_* don't support -// streaming notes to them with operator<<, and for failing tests the lhs and -// rhs are not printed. That's so that this header does not have to include -// sstream, which slows down building ninja_test almost 20%. -namespace testing { -class Test { - bool failed_; - int assertion_failures_; - public: - Test() : failed_(false), assertion_failures_(0) {} - virtual ~Test() {} - virtual void SetUp() {} - virtual void TearDown() {} - virtual void Run() = 0; - - bool Failed() const { return failed_; } - int AssertionFailures() const { return assertion_failures_; } - void AddAssertionFailure() { assertion_failures_++; } - bool Check(bool condition, const char* file, int line, const char* error); -}; -} - -void RegisterTest(testing::Test* (*)(), const char*); - -extern testing::Test* g_current_test; -#define TEST_F_(x, y, name) \ - struct y : public x { \ - static testing::Test* Create() { return g_current_test = new y; } \ - virtual void Run(); \ - }; \ - struct Register##y { \ - Register##y() { RegisterTest(y::Create, name); } \ - }; \ - Register##y g_register_##y; \ - void y::Run() - -#define TEST_F(x, y) TEST_F_(x, x##y, #x "." #y) -#define TEST(x, y) TEST_F_(testing::Test, x##y, #x "." #y) - -#define EXPECT_EQ(a, b) \ - g_current_test->Check(a == b, __FILE__, __LINE__, #a " == " #b) -#define EXPECT_NE(a, b) \ - g_current_test->Check(a != b, __FILE__, __LINE__, #a " != " #b) -#define EXPECT_GT(a, b) \ - g_current_test->Check(a > b, __FILE__, __LINE__, #a " > " #b) -#define EXPECT_LT(a, b) \ - g_current_test->Check(a < b, __FILE__, __LINE__, #a " < " #b) -#define EXPECT_GE(a, b) \ - g_current_test->Check(a >= b, __FILE__, __LINE__, #a " >= " #b) -#define EXPECT_LE(a, b) \ - g_current_test->Check(a <= b, __FILE__, __LINE__, #a " <= " #b) -#define EXPECT_TRUE(a) \ - g_current_test->Check(static_cast(a), __FILE__, __LINE__, #a) -#define EXPECT_FALSE(a) \ - g_current_test->Check(!static_cast(a), __FILE__, __LINE__, #a) - -#define ASSERT_EQ(a, b) \ - if (!EXPECT_EQ(a, b)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_NE(a, b) \ - if (!EXPECT_NE(a, b)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_GT(a, b) \ - if (!EXPECT_GT(a, b)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_LT(a, b) \ - if (!EXPECT_LT(a, b)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_GE(a, b) \ - if (!EXPECT_GE(a, b)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_LE(a, b) \ - if (!EXPECT_LE(a, b)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_TRUE(a) \ - if (!EXPECT_TRUE(a)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_FALSE(a) \ - if (!EXPECT_FALSE(a)) { g_current_test->AddAssertionFailure(); return; } -#define ASSERT_NO_FATAL_FAILURE(a) \ - { \ - int fail_count = g_current_test->AssertionFailures(); \ - a; \ - if (fail_count != g_current_test->AssertionFailures()) { \ - g_current_test->AddAssertionFailure(); \ - return; \ - } \ - } // Support utilities for tests. From 87b8965080693f084e5784efde234b127711d8c1 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Mon, 26 Jul 2021 16:19:37 -0700 Subject: [PATCH 002/127] Improve error messages when VS cannot be found The first experience for most developers who start working on ninja is this cryptic error message: bootstrapping ninja... Traceback (most recent call last): File "configure.py", line 329, in if platform.msvc_needs_fs(): File "configure.py", line 89, in msvc_needs_fs stderr=subprocess.PIPE) File "python\bin\lib\subprocess.py", line 394, in __init__ errread, errwrite) File "python\bin\lib\subprocess.py", line 644, in _execute_child startupinfo) WindowsError: [Error 2] The system cannot find the file specified This message happens when bootstrap.py first tries to invoke cl.exe and it cannot be found. This change looks for cl.exe and warns if it is not in the user's path, leading to this friendlier message: bootstrapping ninja... Traceback (most recent call last): File "configure.py", line 317, in raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS') Exception: cl.exe not found. Run again from the Developer Command Prompt for VS --- configure.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/configure.py b/configure.py index 4ca78fb404..c0d67127b5 100755 --- a/configure.py +++ b/configure.py @@ -305,7 +305,16 @@ def binary(name): else: n.variable('ar', configure_env.get('AR', 'ar')) +def SearchPath(exe_name): + """Find an executable (.exe, .bat, whatever) in the system path.""" + for dir in os.environ['path'].split(';'): + path = os.path.join(dir, exe_name) + if os.path.exists(path): + return path + if platform.is_msvc(): + if not SearchPath('cl.exe'): + raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS') cflags = ['/showIncludes', '/nologo', # Don't print startup banner. '/Zi', # Create pdb with debug info. From 4af9fc5c51040944178b112e6defda8473d40105 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Sun, 21 Sep 2014 16:10:54 -0700 Subject: [PATCH 003/127] support explicit build order --- src/build.cc | 189 ++++++++++++++++++++++++++++++++++++++++++++++++++- src/build.h | 5 ++ src/graph.h | 12 +++- 3 files changed, 200 insertions(+), 6 deletions(-) diff --git a/src/build.cc b/src/build.cc index cf078461e8..8f23e9632c 100644 --- a/src/build.cc +++ b/src/build.cc @@ -89,6 +89,7 @@ void Plan::Reset() { } bool Plan::AddTarget(const Node* target, string* err) { + targets_.push_back(target); return AddSubTarget(target, NULL, err, NULL); } @@ -151,9 +152,20 @@ void Plan::EdgeWanted(const Edge* edge) { Edge* Plan::FindWork() { if (ready_.empty()) return NULL; - EdgeSet::iterator e = ready_.begin(); - Edge* edge = *e; - ready_.erase(e); + set::iterator i = ready_.end(); + for (list::iterator it = priority_list_.begin(), + end = priority_list_.end(); + it != end; ++it) { + i = ready_.find(*it); + if (i != ready_.end()) { + priority_list_.erase(it); + break; + } + } + if (i == ready_.end()) + i = ready_.begin(); + Edge* edge = *i; + ready_.erase(i); return edge; } @@ -424,6 +436,175 @@ void Plan::UnmarkDependents(const Node* node, set* dependents) { } } +namespace { +bool EdgeCom(const Edge* lhs, const Edge* rhs) { + // Note: > to sort in decreasing order. + return lhs->critical_time() > rhs->critical_time(); +} +} // namespace + +void Plan::ComputePriorityList(BuildLog* build_log) { + + //testcase have no build_log + if (!build_log) + return; + + METRIC_RECORD("ComputePriorityList"); + set dedup; + vector deduped; + for (vector::iterator it = targets_.begin(); it != targets_.end(); + ++it) { + pair::iterator, bool> insert_result = dedup.insert(*it); + if (!insert_result.second) + continue; + deduped.push_back(*it); + } + targets_.swap(deduped); + + + vector edges; + map num_out_edges; + for (map::iterator it = want_.begin(), end = want_.end(); + it != end; ++it) { + if (it->second != kWantNothing) + continue; + Edge* e = it->first; + edges.push_back(e); + //printf("in\n"); + set ins; // protect against #308; also sanity + for (size_t nit = 0; nit < e->inputs_.size(); ++nit) { + Node* n = e->inputs_[nit]; + if (ins.count(n) == 0) { + //printf("%s %s\n", (*nit)->path().c_str(), e->rule().name().c_str()); + num_out_edges[n]++; + ins.insert(n); + } + } + //printf("\n"); + } + + if (false) { + for (map::iterator it = num_out_edges.begin(), + end = num_out_edges.end(); + it != end; ++it) { + printf("%s %d\n", it->first->path().c_str(), it->second); + } + } + + + + // this is total time if building all edges in serial, so this value is big + // enough to ensure higher priority target initial critical time always bigger + // than lower one + uint64_t total_time = 0; + // Critical path scheduling. + // 0. Assign costs to all edges, using: + // a) The time the edge needed last time, if available. + // b) The average time this edge type needed, if this edge hasn't run before. + // (not implemented .log entries is not grouped by rule type, and even + // similar rule type may not have same name , for example two compile rule + // with different compile flags) + // c) A fixed cost if this type of edge hasn't run before (0 for phony target, + // 1 for others) + // + for (vector::iterator it = edges.begin(), end = edges.end(); it != end; + total_time += (*it)->run_time_ms_, ++it) { + Edge* edge = *it; + if (edge->is_phony()) + continue; + BuildLog::LogEntry* entry = + build_log->LookupByOutput(edge->outputs_[0]->path()); + if (!entry) { + edge->run_time_ms_ = 1; + continue; + } + int duration = entry->end_time - entry->start_time; // XXX: + 1? + edge->run_time_ms_ = duration; + } + + + // Dump graph to stdout for debugging / prototyping. + if (false) { + for (vector::iterator it = edges.begin(), end = edges.end(); + it != end; ++it) { + Edge* edge = *it; + Node* input = edge->inputs_[0]; + Node* output = edge->outputs_[0]; + printf("%s %s %d\n", input->path().c_str(), output->path().c_str(), + edge->run_time_ms_); + } + } + + // 1. Use backflow algorithm to compute critical times for all nodes, starting + // from the destination nodes. use priority_weight = total_time * N as + // initial critical time to makes forward edgs of higher priority always + // get higher critical time value + // XXX: ignores pools + queue edgesQ; + + // Makes sure that each edge is added to the queue just once. This is needed + // for example if a binary is used to generate 50 source files, and all the + // source file cxx lines are added. Without this, the edge generating that + // binary would be added ot the queue 50 times. + set done; + + for (vector::reverse_iterator it = targets_.rbegin(), + end = targets_.rend(); + it != end; ++it) { + if (Edge* in = (*it)->in_edge()) { + uint64_t priority_weight = (it - targets_.rbegin()) * total_time; + in->set_critical_time( + max(max(in->run_time_ms_, priority_weight), + in->critical_time())); + if (done.count(in) == 0) { + edgesQ.push(in); + done.insert(in); + } + } + } + while (!edgesQ.empty()) { + Edge* e = edgesQ.front(); edgesQ.pop(); + bool all_nodes_ready = true; + uint64_t max_crit = 0; + for (vector::iterator it = e->outputs_.begin(), + end = e->outputs_.end(); + it != end; ++it) { + if (num_out_edges[*it] > 0) { + all_nodes_ready = false; + continue; + } + for (vector::const_iterator eit = (*it)->out_edges().begin(), + eend = (*it)->out_edges().end(); + eit != eend; ++eit) { + max_crit = max((*eit)->critical_time(), max_crit); + } + } + if (!all_nodes_ready) { + // To the back it goes. + // XXX: think about how often this can happen. + edgesQ.push(e); + continue; + } + e->set_critical_time(max(max_crit + e->run_time_ms_, e->critical_time())); + + for (vector::iterator it = e->inputs_.begin(), + end = e->inputs_.end(); + it != end; ++it) { + num_out_edges[*it]--; + if (Edge* in = (*it)->in_edge()) { + if (done.count(in) == 0) { + edgesQ.push(in); + done.insert(in); + } + } + } + } + + // 2. Build priority list in decreasing order of critical times. + sort(edges.begin(), edges.end(), EdgeCom); + priority_list_ = list(edges.begin(), edges.end()); +} + void Plan::Dump() const { printf("pending: %d\n", (int)want_.size()); for (map::const_iterator e = want_.begin(); e != want_.end(); ++e) { @@ -574,6 +755,8 @@ bool Builder::AlreadyUpToDate() const { bool Builder::Build(string* err) { assert(!AlreadyUpToDate()); + plan_.ComputePriorityList(scan_.build_log()); + status_->PlanHasTotalEdges(plan_.command_edge_count()); int pending_commands = 0; int failures_allowed = config_.failures_allowed; diff --git a/src/build.h b/src/build.h index d697dfb89e..11c87da204 100644 --- a/src/build.h +++ b/src/build.h @@ -16,6 +16,7 @@ #define NINJA_BUILD_H_ #include +#include #include #include #include @@ -75,6 +76,7 @@ struct Plan { /// Reset state. Clears want and ready sets. void Reset(); + void ComputePriorityList(BuildLog* build_log); /// Update the build plan to account for modifications made to the graph /// by information loaded from a dyndep file. @@ -122,6 +124,9 @@ struct Plan { EdgeSet ready_; Builder* builder_; + /// user provided targets in build order, earlier one have higher priority + vector targets_; + list priority_list_; /// Total number of edges that have commands (not phony). int command_edges_; diff --git a/src/graph.h b/src/graph.h index bb4f10c479..47a2e57602 100644 --- a/src/graph.h +++ b/src/graph.h @@ -146,9 +146,10 @@ struct Edge { Edge() : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone), - id_(0), outputs_ready_(false), deps_loaded_(false), - deps_missing_(false), generated_by_dep_loader_(false), - implicit_deps_(0), order_only_deps_(0), implicit_outs_(0) {} + id_(0), run_time_ms_(0), critical_time_(0), outputs_ready_(false), + deps_loaded_(false), deps_missing_(false), + generated_by_dep_loader_(false), implicit_deps_(0), + order_only_deps_(0), implicit_outs_(0) {} /// Return true if all inputs' in-edges are ready. bool AllInputsReady() const; @@ -171,6 +172,9 @@ struct Edge { void Dump(const char* prefix="") const; + uint64_t critical_time() const { return critical_time_; } + void set_critical_time(uint64_t critical_time) { critical_time_ = critical_time; } + const Rule* rule_; Pool* pool_; std::vector inputs_; @@ -179,6 +183,8 @@ struct Edge { BindingEnv* env_; VisitMark mark_; size_t id_; + int run_time_ms_; + uint64_t critical_time_; bool outputs_ready_; bool deps_loaded_; bool deps_missing_; From 12b5b7cd535f218895433c4ba10f6556bd44ac42 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Wed, 25 Aug 2021 12:11:04 +0100 Subject: [PATCH 004/127] Use explicit std:: style and remove debug print statements --- src/build.cc | 82 ++++++++++++++++++---------------------------------- src/build.h | 4 +-- 2 files changed, 30 insertions(+), 56 deletions(-) diff --git a/src/build.cc b/src/build.cc index 8f23e9632c..5b6ad2f0de 100644 --- a/src/build.cc +++ b/src/build.cc @@ -152,9 +152,9 @@ void Plan::EdgeWanted(const Edge* edge) { Edge* Plan::FindWork() { if (ready_.empty()) return NULL; - set::iterator i = ready_.end(); - for (list::iterator it = priority_list_.begin(), - end = priority_list_.end(); + std::set::iterator i = ready_.end(); + for (std::list::iterator it = priority_list_.begin(), + end = priority_list_.end(); it != end; ++it) { i = ready_.find(*it); if (i != ready_.end()) { @@ -450,11 +450,11 @@ void Plan::ComputePriorityList(BuildLog* build_log) { return; METRIC_RECORD("ComputePriorityList"); - set dedup; - vector deduped; - for (vector::iterator it = targets_.begin(); it != targets_.end(); + std::set dedup; + std::vector deduped; + for (std::vector::iterator it = targets_.begin(); it != targets_.end(); ++it) { - pair::iterator, bool> insert_result = dedup.insert(*it); + std::pair::iterator, bool> insert_result = dedup.insert(*it); if (!insert_result.second) continue; deduped.push_back(*it); @@ -462,37 +462,24 @@ void Plan::ComputePriorityList(BuildLog* build_log) { targets_.swap(deduped); - vector edges; - map num_out_edges; - for (map::iterator it = want_.begin(), end = want_.end(); + std::vector edges; + std::map num_out_edges; + for (std::map::iterator it = want_.begin(), end = want_.end(); it != end; ++it) { if (it->second != kWantNothing) continue; Edge* e = it->first; edges.push_back(e); - //printf("in\n"); - set ins; // protect against #308; also sanity + std::set ins; // protect against #308; also sanity for (size_t nit = 0; nit < e->inputs_.size(); ++nit) { Node* n = e->inputs_[nit]; if (ins.count(n) == 0) { - //printf("%s %s\n", (*nit)->path().c_str(), e->rule().name().c_str()); num_out_edges[n]++; ins.insert(n); } } - //printf("\n"); - } - - if (false) { - for (map::iterator it = num_out_edges.begin(), - end = num_out_edges.end(); - it != end; ++it) { - printf("%s %d\n", it->first->path().c_str(), it->second); - } } - - // this is total time if building all edges in serial, so this value is big // enough to ensure higher priority target initial critical time always bigger // than lower one @@ -507,7 +494,7 @@ void Plan::ComputePriorityList(BuildLog* build_log) { // c) A fixed cost if this type of edge hasn't run before (0 for phony target, // 1 for others) // - for (vector::iterator it = edges.begin(), end = edges.end(); it != end; + for (std::vector::iterator it = edges.begin(), end = edges.end(); it != end; total_time += (*it)->run_time_ms_, ++it) { Edge* edge = *it; if (edge->is_phony()) @@ -518,43 +505,30 @@ void Plan::ComputePriorityList(BuildLog* build_log) { edge->run_time_ms_ = 1; continue; } - int duration = entry->end_time - entry->start_time; // XXX: + 1? + int duration = entry->end_time - entry->start_time; edge->run_time_ms_ = duration; } - - // Dump graph to stdout for debugging / prototyping. - if (false) { - for (vector::iterator it = edges.begin(), end = edges.end(); - it != end; ++it) { - Edge* edge = *it; - Node* input = edge->inputs_[0]; - Node* output = edge->outputs_[0]; - printf("%s %s %d\n", input->path().c_str(), output->path().c_str(), - edge->run_time_ms_); - } - } - // 1. Use backflow algorithm to compute critical times for all nodes, starting // from the destination nodes. use priority_weight = total_time * N as // initial critical time to makes forward edgs of higher priority always // get higher critical time value // XXX: ignores pools - queue edgesQ; + std::queue edgesQ; // Makes sure that each edge is added to the queue just once. This is needed // for example if a binary is used to generate 50 source files, and all the // source file cxx lines are added. Without this, the edge generating that // binary would be added ot the queue 50 times. - set done; + std::set done; - for (vector::reverse_iterator it = targets_.rbegin(), - end = targets_.rend(); + for (std::vector::reverse_iterator it = targets_.rbegin(), + end = targets_.rend(); it != end; ++it) { if (Edge* in = (*it)->in_edge()) { uint64_t priority_weight = (it - targets_.rbegin()) * total_time; in->set_critical_time( - max(max(in->run_time_ms_, priority_weight), + std::max(std::max(in->run_time_ms_, priority_weight), in->critical_time())); if (done.count(in) == 0) { edgesQ.push(in); @@ -566,17 +540,17 @@ void Plan::ComputePriorityList(BuildLog* build_log) { Edge* e = edgesQ.front(); edgesQ.pop(); bool all_nodes_ready = true; uint64_t max_crit = 0; - for (vector::iterator it = e->outputs_.begin(), - end = e->outputs_.end(); + for (std::vector::iterator it = e->outputs_.begin(), + end = e->outputs_.end(); it != end; ++it) { if (num_out_edges[*it] > 0) { all_nodes_ready = false; continue; } - for (vector::const_iterator eit = (*it)->out_edges().begin(), - eend = (*it)->out_edges().end(); + for (std::vector::const_iterator eit = (*it)->out_edges().begin(), + eend = (*it)->out_edges().end(); eit != eend; ++eit) { - max_crit = max((*eit)->critical_time(), max_crit); + max_crit = std::max((*eit)->critical_time(), max_crit); } } if (!all_nodes_ready) { @@ -585,10 +559,10 @@ void Plan::ComputePriorityList(BuildLog* build_log) { edgesQ.push(e); continue; } - e->set_critical_time(max(max_crit + e->run_time_ms_, e->critical_time())); + e->set_critical_time(std::max(max_crit + e->run_time_ms_, e->critical_time())); - for (vector::iterator it = e->inputs_.begin(), - end = e->inputs_.end(); + for (std::vector::iterator it = e->inputs_.begin(), + end = e->inputs_.end(); it != end; ++it) { num_out_edges[*it]--; if (Edge* in = (*it)->in_edge()) { @@ -601,8 +575,8 @@ void Plan::ComputePriorityList(BuildLog* build_log) { } // 2. Build priority list in decreasing order of critical times. - sort(edges.begin(), edges.end(), EdgeCom); - priority_list_ = list(edges.begin(), edges.end()); + std::sort(edges.begin(), edges.end(), EdgeCom); + priority_list_.assign(edges.begin(), edges.end()); } void Plan::Dump() const { diff --git a/src/build.h b/src/build.h index 11c87da204..5ee56503d4 100644 --- a/src/build.h +++ b/src/build.h @@ -125,8 +125,8 @@ struct Plan { Builder* builder_; /// user provided targets in build order, earlier one have higher priority - vector targets_; - list priority_list_; + std::vector targets_; + std::list priority_list_; /// Total number of edges that have commands (not phony). int command_edges_; From 8e232000f07d7a9c50bb42acb50b9647aba7f80d Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Wed, 25 Aug 2021 15:49:03 +0100 Subject: [PATCH 005/127] Change priority_list_ into a std::priority_queue of ready edges --- src/build.cc | 82 +++++++++++++++++++++++++--------------------------- src/build.h | 57 +++++++++++++++++++++++++++++++++--- 2 files changed, 92 insertions(+), 47 deletions(-) diff --git a/src/build.cc b/src/build.cc index 5b6ad2f0de..76df857810 100644 --- a/src/build.cc +++ b/src/build.cc @@ -75,6 +75,16 @@ bool DryRunCommandRunner::WaitForCommand(Result* result) { } // namespace + +bool EdgeQueue::EdgePriorityCompare::operator()(const Edge* e1, const Edge* e2) const { + const uint64_t ct1 = e1->critical_time(); + const uint64_t ct2 = e2->critical_time(); + if (ct1 != ct2) { + return ct1 < ct2; + } + return e1->id_ < e2->id_; +} + Plan::Plan(Builder* builder) : builder_(builder) , command_edges_(0) @@ -152,21 +162,7 @@ void Plan::EdgeWanted(const Edge* edge) { Edge* Plan::FindWork() { if (ready_.empty()) return NULL; - std::set::iterator i = ready_.end(); - for (std::list::iterator it = priority_list_.begin(), - end = priority_list_.end(); - it != end; ++it) { - i = ready_.find(*it); - if (i != ready_.end()) { - priority_list_.erase(it); - break; - } - } - if (i == ready_.end()) - i = ready_.begin(); - Edge* edge = *i; - ready_.erase(i); - return edge; + return ready_.pop(); } void Plan::ScheduleWork(map::iterator want_e) { @@ -184,10 +180,12 @@ void Plan::ScheduleWork(map::iterator want_e) { Pool* pool = edge->pool(); if (pool->ShouldDelayEdge()) { pool->DelayEdge(edge); - pool->RetrieveReadyEdges(&ready_); + EdgeSet new_edges; + pool->RetrieveReadyEdges(&new_edges); + ready_.push(new_edges.begin(), new_edges.end()); } else { pool->EdgeScheduled(*edge); - ready_.insert(edge); + ready_.push(edge); } } @@ -199,7 +197,9 @@ bool Plan::EdgeFinished(Edge* edge, EdgeResult result, string* err) { // See if this job frees up any delayed jobs. if (directly_wanted) edge->pool()->EdgeFinished(*edge); - edge->pool()->RetrieveReadyEdges(&ready_); + EdgeSet new_edges; + edge->pool()->RetrieveReadyEdges(&new_edges); + ready_.push(new_edges.begin(), new_edges.end()); // The rest of this function only applies to successful commands. if (result != kEdgeSucceeded) @@ -437,29 +437,32 @@ void Plan::UnmarkDependents(const Node* node, set* dependents) { } namespace { -bool EdgeCom(const Edge* lhs, const Edge* rhs) { - // Note: > to sort in decreasing order. - return lhs->critical_time() > rhs->critical_time(); -} + +struct SeenNodeBefore { + std::set *seen; + + bool operator() (const Node* node) { + // Return true if the node has been seen before + return !seen->insert(node).second; + } +}; + } // namespace -void Plan::ComputePriorityList(BuildLog* build_log) { +void Plan::ComputeCriticalTime(BuildLog* build_log) { //testcase have no build_log if (!build_log) return; METRIC_RECORD("ComputePriorityList"); - std::set dedup; - std::vector deduped; - for (std::vector::iterator it = targets_.begin(); it != targets_.end(); - ++it) { - std::pair::iterator, bool> insert_result = dedup.insert(*it); - if (!insert_result.second) - continue; - deduped.push_back(*it); + // Remove duplicate targets + { + std::set seen; + targets_.erase( + std::remove_if(targets_.begin(), targets_.end(), SeenNodeBefore{&seen}), + targets_.end()); } - targets_.swap(deduped); std::vector edges; @@ -473,9 +476,8 @@ void Plan::ComputePriorityList(BuildLog* build_log) { std::set ins; // protect against #308; also sanity for (size_t nit = 0; nit < e->inputs_.size(); ++nit) { Node* n = e->inputs_[nit]; - if (ins.count(n) == 0) { + if (ins.insert(n).second) { num_out_edges[n]++; - ins.insert(n); } } } @@ -509,7 +511,7 @@ void Plan::ComputePriorityList(BuildLog* build_log) { edge->run_time_ms_ = duration; } - // 1. Use backflow algorithm to compute critical times for all nodes, starting + // Use backflow algorithm to compute critical times for all nodes, starting // from the destination nodes. use priority_weight = total_time * N as // initial critical time to makes forward edgs of higher priority always // get higher critical time value @@ -530,9 +532,8 @@ void Plan::ComputePriorityList(BuildLog* build_log) { in->set_critical_time( std::max(std::max(in->run_time_ms_, priority_weight), in->critical_time())); - if (done.count(in) == 0) { + if (done.insert(in).second) { edgesQ.push(in); - done.insert(in); } } } @@ -566,17 +567,12 @@ void Plan::ComputePriorityList(BuildLog* build_log) { it != end; ++it) { num_out_edges[*it]--; if (Edge* in = (*it)->in_edge()) { - if (done.count(in) == 0) { + if (done.insert(in).second) { edgesQ.push(in); - done.insert(in); } } } } - - // 2. Build priority list in decreasing order of critical times. - std::sort(edges.begin(), edges.end(), EdgeCom); - priority_list_.assign(edges.begin(), edges.end()); } void Plan::Dump() const { diff --git a/src/build.h b/src/build.h index 5ee56503d4..ec6deea2e1 100644 --- a/src/build.h +++ b/src/build.h @@ -16,7 +16,7 @@ #define NINJA_BUILD_H_ #include -#include +#include #include #include #include @@ -36,6 +36,56 @@ struct Node; struct State; struct Status; + +// Set of ready edges, sorted by priority +class EdgeQueue { + struct EdgePriorityCompare { + bool operator()(const Edge* e1, const Edge* e2) const; + }; + + std::priority_queue, EdgePriorityCompare> queue_; + // Set to ensure no duplicate entries in ready_ + EdgeSet set_; + +public: + + void push(Edge* edge) { + if (set_.insert(edge).second) { + queue_.push(edge); + } + } + + template + void push(It first, It last) { + for (; first != last; ++first) { + push(*first); + } + } + + Edge* pop() { + Edge* ret = queue_.top(); + queue_.pop(); + set_.erase(ret); + return ret; + } + + void clear() { + set_.clear(); + while (!queue_.empty()) { + queue_.pop(); + } + } + + size_t size() const { + return queue_.size(); + } + + bool empty() const { + return queue_.empty(); + } +}; + + /// Plan stores the state of a build plan: what we intend to build, /// which steps we're ready to execute. struct Plan { @@ -76,7 +126,7 @@ struct Plan { /// Reset state. Clears want and ready sets. void Reset(); - void ComputePriorityList(BuildLog* build_log); + void ComputeCriticalTime(BuildLog* build_log); /// Update the build plan to account for modifications made to the graph /// by information loaded from a dyndep file. @@ -121,12 +171,11 @@ struct Plan { /// we want for the edge. std::map want_; - EdgeSet ready_; + EdgeQueue ready_; Builder* builder_; /// user provided targets in build order, earlier one have higher priority std::vector targets_; - std::list priority_list_; /// Total number of edges that have commands (not phony). int command_edges_; From 2fcf403ac54d77d7dc8d582c28aa86a911428da4 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Wed, 25 Aug 2021 16:18:40 +0100 Subject: [PATCH 006/127] Fix critical time calculation The existing algorithm doesn't work because it strictly requires that all outputs are visited before updating an edge. So any task downstream from a task with multiple out-edges may get ignored. The fix is to always propagate your critical time to the next input node, and only place it in the queue if you offer a higher critical time. --- src/build.cc | 97 +++++++++++++++++----------------------------------- src/graph.h | 8 ++--- 2 files changed, 35 insertions(+), 70 deletions(-) diff --git a/src/build.cc b/src/build.cc index 76df857810..a4dfc01b4f 100644 --- a/src/build.cc +++ b/src/build.cc @@ -77,8 +77,8 @@ bool DryRunCommandRunner::WaitForCommand(Result* result) { bool EdgeQueue::EdgePriorityCompare::operator()(const Edge* e1, const Edge* e2) const { - const uint64_t ct1 = e1->critical_time(); - const uint64_t ct2 = e2->critical_time(); + const int64_t ct1 = e1->critical_time(); + const int64_t ct2 = e2->critical_time(); if (ct1 != ct2) { return ct1 < ct2; } @@ -464,28 +464,10 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { targets_.end()); } - - std::vector edges; - std::map num_out_edges; - for (std::map::iterator it = want_.begin(), end = want_.end(); - it != end; ++it) { - if (it->second != kWantNothing) - continue; - Edge* e = it->first; - edges.push_back(e); - std::set ins; // protect against #308; also sanity - for (size_t nit = 0; nit < e->inputs_.size(); ++nit) { - Node* n = e->inputs_[nit]; - if (ins.insert(n).second) { - num_out_edges[n]++; - } - } - } - // this is total time if building all edges in serial, so this value is big // enough to ensure higher priority target initial critical time always bigger // than lower one - uint64_t total_time = 0; + int64_t total_time = 0; // Critical path scheduling. // 0. Assign costs to all edges, using: // a) The time the edge needed last time, if available. @@ -496,11 +478,12 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // c) A fixed cost if this type of edge hasn't run before (0 for phony target, // 1 for others) // - for (std::vector::iterator it = edges.begin(), end = edges.end(); it != end; - total_time += (*it)->run_time_ms_, ++it) { - Edge* edge = *it; - if (edge->is_phony()) + for (std::map::iterator it = want_.begin(), end = want_.end(); + it != end; ++it) { + Edge* edge = it->first; + if (edge->is_phony()) { continue; + } BuildLog::LogEntry* entry = build_log->LookupByOutput(edge->outputs_[0]->path()); if (!entry) { @@ -512,62 +495,44 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { } // Use backflow algorithm to compute critical times for all nodes, starting - // from the destination nodes. use priority_weight = total_time * N as - // initial critical time to makes forward edgs of higher priority always - // get higher critical time value + // from the destination nodes. // XXX: ignores pools - std::queue edgesQ; - - // Makes sure that each edge is added to the queue just once. This is needed - // for example if a binary is used to generate 50 source files, and all the - // source file cxx lines are added. Without this, the edge generating that - // binary would be added ot the queue 50 times. - std::set done; + std::set active_edges; // All edges in edgesQ (for uniqueness) + std::queue edgesQ; // Queue, for breadth-first traversal for (std::vector::reverse_iterator it = targets_.rbegin(), end = targets_.rend(); it != end; ++it) { if (Edge* in = (*it)->in_edge()) { - uint64_t priority_weight = (it - targets_.rbegin()) * total_time; + // Use initial critical time: total_time * N. This means higher + // priority targets always get a higher critical time value + int64_t priority_weight = (it - targets_.rbegin()) * total_time; in->set_critical_time( - std::max(std::max(in->run_time_ms_, priority_weight), - in->critical_time())); - if (done.insert(in).second) { + priority_weight + + std::max(in->run_time_ms_, in->critical_time())); + if (active_edges.insert(in).second) { edgesQ.push(in); } } } + while (!edgesQ.empty()) { - Edge* e = edgesQ.front(); edgesQ.pop(); - bool all_nodes_ready = true; - uint64_t max_crit = 0; - for (std::vector::iterator it = e->outputs_.begin(), - end = e->outputs_.end(); - it != end; ++it) { - if (num_out_edges[*it] > 0) { - all_nodes_ready = false; - continue; - } - for (std::vector::const_iterator eit = (*it)->out_edges().begin(), - eend = (*it)->out_edges().end(); - eit != eend; ++eit) { - max_crit = std::max((*eit)->critical_time(), max_crit); - } - } - if (!all_nodes_ready) { - // To the back it goes. - // XXX: think about how often this can happen. - edgesQ.push(e); - continue; - } - e->set_critical_time(std::max(max_crit + e->run_time_ms_, e->critical_time())); + Edge* e = edgesQ.front(); + edgesQ.pop(); + active_edges.erase(e); for (std::vector::iterator it = e->inputs_.begin(), end = e->inputs_.end(); it != end; ++it) { - num_out_edges[*it]--; - if (Edge* in = (*it)->in_edge()) { - if (done.insert(in).second) { + Edge* in = (*it)->in_edge(); + if (!in) { + continue; + } + // Only process edge if this node offers a higher critical time + const int64_t proposed_time = e->critical_time() + in->run_time_ms_; + if (proposed_time > in->critical_time()) { + in->set_critical_time(proposed_time); + if (active_edges.insert(in).second) { edgesQ.push(in); } } @@ -725,7 +690,7 @@ bool Builder::AlreadyUpToDate() const { bool Builder::Build(string* err) { assert(!AlreadyUpToDate()); - plan_.ComputePriorityList(scan_.build_log()); + plan_.ComputeCriticalTime(scan_.build_log()); status_->PlanHasTotalEdges(plan_.command_edge_count()); int pending_commands = 0; diff --git a/src/graph.h b/src/graph.h index 47a2e57602..1bc79fac4e 100644 --- a/src/graph.h +++ b/src/graph.h @@ -146,7 +146,7 @@ struct Edge { Edge() : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone), - id_(0), run_time_ms_(0), critical_time_(0), outputs_ready_(false), + id_(0), run_time_ms_(0), critical_time_(-1), outputs_ready_(false), deps_loaded_(false), deps_missing_(false), generated_by_dep_loader_(false), implicit_deps_(0), order_only_deps_(0), implicit_outs_(0) {} @@ -172,8 +172,8 @@ struct Edge { void Dump(const char* prefix="") const; - uint64_t critical_time() const { return critical_time_; } - void set_critical_time(uint64_t critical_time) { critical_time_ = critical_time; } + int64_t critical_time() const { return critical_time_; } + void set_critical_time(int64_t critical_time) { critical_time_ = critical_time; } const Rule* rule_; Pool* pool_; @@ -184,7 +184,7 @@ struct Edge { VisitMark mark_; size_t id_; int run_time_ms_; - uint64_t critical_time_; + int64_t critical_time_; bool outputs_ready_; bool deps_loaded_; bool deps_missing_; From c5d355cbb766a6bbce517dbd914dc671cea526b3 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Wed, 25 Aug 2021 20:53:28 +0100 Subject: [PATCH 007/127] clang-format diff --- src/build.cc | 9 ++++----- src/build.h | 12 +++--------- src/graph.h | 8 +++++--- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/src/build.cc b/src/build.cc index a4dfc01b4f..8b638b7398 100644 --- a/src/build.cc +++ b/src/build.cc @@ -450,8 +450,7 @@ struct SeenNodeBefore { } // namespace void Plan::ComputeCriticalTime(BuildLog* build_log) { - - //testcase have no build_log + // testcases have no build_log if (!build_log) return; @@ -459,9 +458,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // Remove duplicate targets { std::set seen; - targets_.erase( - std::remove_if(targets_.begin(), targets_.end(), SeenNodeBefore{&seen}), - targets_.end()); + targets_.erase(std::remove_if(targets_.begin(), targets_.end(), + SeenNodeBefore{ &seen }), + targets_.end()); } // this is total time if building all edges in serial, so this value is big diff --git a/src/build.h b/src/build.h index ec6deea2e1..410c2db753 100644 --- a/src/build.h +++ b/src/build.h @@ -47,8 +47,7 @@ class EdgeQueue { // Set to ensure no duplicate entries in ready_ EdgeSet set_; -public: - + public: void push(Edge* edge) { if (set_.insert(edge).second) { queue_.push(edge); @@ -76,16 +75,11 @@ class EdgeQueue { } } - size_t size() const { - return queue_.size(); - } + size_t size() const { return queue_.size(); } - bool empty() const { - return queue_.empty(); - } + bool empty() const { return queue_.empty(); } }; - /// Plan stores the state of a build plan: what we intend to build, /// which steps we're ready to execute. struct Plan { diff --git a/src/graph.h b/src/graph.h index 1bc79fac4e..b66ae6bcee 100644 --- a/src/graph.h +++ b/src/graph.h @@ -148,8 +148,8 @@ struct Edge { : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone), id_(0), run_time_ms_(0), critical_time_(-1), outputs_ready_(false), deps_loaded_(false), deps_missing_(false), - generated_by_dep_loader_(false), implicit_deps_(0), - order_only_deps_(0), implicit_outs_(0) {} + generated_by_dep_loader_(false), implicit_deps_(0), order_only_deps_(0), + implicit_outs_(0) {} /// Return true if all inputs' in-edges are ready. bool AllInputsReady() const; @@ -173,7 +173,9 @@ struct Edge { void Dump(const char* prefix="") const; int64_t critical_time() const { return critical_time_; } - void set_critical_time(int64_t critical_time) { critical_time_ = critical_time; } + void set_critical_time(int64_t critical_time) { + critical_time_ = critical_time; + } const Rule* rule_; Pool* pool_; From 63b0a9a6a170793f98aa22da827ee3ac11eb1a50 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Thu, 26 Aug 2021 00:53:54 +0100 Subject: [PATCH 008/127] Address review comments --- src/build.cc | 40 ++++++++++++++++++++++------------------ src/build.h | 12 ++---------- src/graph.h | 2 +- 3 files changed, 25 insertions(+), 29 deletions(-) diff --git a/src/build.cc b/src/build.cc index 8b638b7398..fd220e5a23 100644 --- a/src/build.cc +++ b/src/build.cc @@ -438,12 +438,15 @@ void Plan::UnmarkDependents(const Node* node, set* dependents) { namespace { -struct SeenNodeBefore { - std::set *seen; +template +struct SeenBefore { + std::set *seen_; - bool operator() (const Node* node) { - // Return true if the node has been seen before - return !seen->insert(node).second; + SeenBefore(std::set* seen) : seen_(seen) {} + + bool operator() (const T* item) { + // Return true if the item has been seen before + return !seen_->insert(item).second; } }; @@ -458,8 +461,8 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // Remove duplicate targets { std::set seen; - targets_.erase(std::remove_if(targets_.begin(), targets_.end(), - SeenNodeBefore{ &seen }), + SeenBefore seen_before(&seen); + targets_.erase(std::remove_if(targets_.begin(), targets_.end(), seen_before), targets_.end()); } @@ -489,15 +492,16 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { edge->run_time_ms_ = 1; continue; } - int duration = entry->end_time - entry->start_time; - edge->run_time_ms_ = duration; + edge->run_time_ms_ = entry->end_time - entry->start_time; } // Use backflow algorithm to compute critical times for all nodes, starting // from the destination nodes. // XXX: ignores pools - std::set active_edges; // All edges in edgesQ (for uniqueness) - std::queue edgesQ; // Queue, for breadth-first traversal + std::queue breadthFirstEdges; // Queue, for breadth-first traversal + std::set active_edges; // Set of in breadthFirstEdges + SeenBefore seen_edge( + &active_edges); // Test for uniqueness in breadthFirstEdges for (std::vector::reverse_iterator it = targets_.rbegin(), end = targets_.rend(); @@ -509,15 +513,15 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { in->set_critical_time( priority_weight + std::max(in->run_time_ms_, in->critical_time())); - if (active_edges.insert(in).second) { - edgesQ.push(in); + if (!seen_edge(in)) { + breadthFirstEdges.push(in); } } } - while (!edgesQ.empty()) { - Edge* e = edgesQ.front(); - edgesQ.pop(); + while (!breadthFirstEdges.empty()) { + Edge* e = breadthFirstEdges.front(); + breadthFirstEdges.pop(); active_edges.erase(e); for (std::vector::iterator it = e->inputs_.begin(), @@ -531,8 +535,8 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { const int64_t proposed_time = e->critical_time() + in->run_time_ms_; if (proposed_time > in->critical_time()) { in->set_critical_time(proposed_time); - if (active_edges.insert(in).second) { - edgesQ.push(in); + if (!seen_edge(in)) { + breadthFirstEdges.push(in); } } } diff --git a/src/build.h b/src/build.h index 410c2db753..4e36e16956 100644 --- a/src/build.h +++ b/src/build.h @@ -44,14 +44,10 @@ class EdgeQueue { }; std::priority_queue, EdgePriorityCompare> queue_; - // Set to ensure no duplicate entries in ready_ - EdgeSet set_; public: void push(Edge* edge) { - if (set_.insert(edge).second) { - queue_.push(edge); - } + queue_.push(edge); } template @@ -64,15 +60,11 @@ class EdgeQueue { Edge* pop() { Edge* ret = queue_.top(); queue_.pop(); - set_.erase(ret); return ret; } void clear() { - set_.clear(); - while (!queue_.empty()) { - queue_.pop(); - } + queue_ = std::priority_queue, EdgePriorityCompare>(); } size_t size() const { return queue_.size(); } diff --git a/src/graph.h b/src/graph.h index b66ae6bcee..7851504ff6 100644 --- a/src/graph.h +++ b/src/graph.h @@ -185,7 +185,7 @@ struct Edge { BindingEnv* env_; VisitMark mark_; size_t id_; - int run_time_ms_; + int64_t run_time_ms_; int64_t critical_time_; bool outputs_ready_; bool deps_loaded_; From 5b8d19b24b00973976990303524f47750e3e1dc4 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Thu, 26 Aug 2021 00:58:31 +0100 Subject: [PATCH 009/127] Fix total_time computation --- src/build.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/build.cc b/src/build.cc index fd220e5a23..08b0cc689e 100644 --- a/src/build.cc +++ b/src/build.cc @@ -492,7 +492,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { edge->run_time_ms_ = 1; continue; } - edge->run_time_ms_ = entry->end_time - entry->start_time; + const int64_t duration = entry->end_time - entry->start_time; + edge->run_time_ms_ = duration; + total_time += duration; } // Use backflow algorithm to compute critical times for all nodes, starting From fe80637327997d0a97e1eee45e3d0929edc75bbd Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Fri, 27 Aug 2021 12:12:45 +0100 Subject: [PATCH 010/127] Address review comments --- src/build.cc | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/build.cc b/src/build.cc index 08b0cc689e..99e6efa7a5 100644 --- a/src/build.cc +++ b/src/build.cc @@ -440,7 +440,7 @@ namespace { template struct SeenBefore { - std::set *seen_; + std::set* seen_; SeenBefore(std::set* seen) : seen_(seen) {} @@ -473,13 +473,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // Critical path scheduling. // 0. Assign costs to all edges, using: // a) The time the edge needed last time, if available. - // b) The average time this edge type needed, if this edge hasn't run before. - // (not implemented .log entries is not grouped by rule type, and even - // similar rule type may not have same name , for example two compile rule - // with different compile flags) - // c) A fixed cost if this type of edge hasn't run before (0 for phony target, - // 1 for others) - // + // b) A fixed cost if this type of edge hasn't run before (0 for + // phony target, 1 for others) + // TODO: Find a better heuristic for edges without log entries for (std::map::iterator it = want_.begin(), end = want_.end(); it != end; ++it) { Edge* edge = it->first; @@ -490,6 +486,7 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { build_log->LookupByOutput(edge->outputs_[0]->path()); if (!entry) { edge->run_time_ms_ = 1; + total_time += 1; continue; } const int64_t duration = entry->end_time - entry->start_time; From c83167fb6e5d862fe6389e3996f2a293e2e6554c Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Fri, 27 Aug 2021 17:15:27 +0100 Subject: [PATCH 011/127] Improve heuristic for unknown cost edges --- src/build.cc | 92 +++++++++++++++++++++++++++++++++++++--------------- src/build.h | 23 ++++++------- 2 files changed, 77 insertions(+), 38 deletions(-) diff --git a/src/build.cc b/src/build.cc index 99e6efa7a5..6f9cf27dcd 100644 --- a/src/build.cc +++ b/src/build.cc @@ -450,6 +450,67 @@ struct SeenBefore { } }; +// Assign run_time_ms_ for all wanted edges, and returns total time for all edges +// For phony edges, 0 cost. +// For edges with a build history, use the last build time. +// For edges without history, use the 75th percentile time for edges with history. +// Or, if there is no history at all just use 1 +int64_t AssignEdgeRuntime(BuildLog* build_log, + const std::map& want) { + bool missing_durations = false; + std::vector durations; + int64_t total_time = 0; + + for (std::map::const_iterator it = want.begin(), + end = want.end(); + it != end; ++it) { + Edge* edge = it->first; + if (edge->is_phony()) { + continue; + } + BuildLog::LogEntry* entry = + build_log->LookupByOutput(edge->outputs_[0]->path()); + if (!entry) { + missing_durations = true; + edge->run_time_ms_ = -1; // -1 to mark as needing filled in + continue; + } + const int64_t duration = entry->end_time - entry->start_time; + edge->run_time_ms_ = duration; + total_time += duration; + durations.push_back(duration); + } + + if (!missing_durations) { + return total_time; + } + + // Heuristic: for unknown edges, take the 75th percentile time. + // This allows the known-slowest jobs to run first, but isn't so + // small that it is always the lowest priority. Which for slow jobs, + // might bottleneck the build. + int64_t p75_time = 1; + int64_t num_durations = static_cast(durations.size()); + if (num_durations > 0) { + size_t p75_idx = (num_durations - 1) - num_durations / 4; + std::vector::iterator p75_it = durations.begin() + p75_idx; + std::nth_element(durations.begin(), p75_it, durations.end()); + p75_time = *p75_it; + } + + for (std::map::const_iterator it = want.begin(), + end = want.end(); + it != end; ++it) { + Edge* edge = it->first; + if (edge->run_time_ms_ >= 0) { + continue; + } + edge->run_time_ms_ = p75_time; + total_time += p75_time; + } + return total_time; +} + } // namespace void Plan::ComputeCriticalTime(BuildLog* build_log) { @@ -466,33 +527,10 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { targets_.end()); } - // this is total time if building all edges in serial, so this value is big - // enough to ensure higher priority target initial critical time always bigger - // than lower one - int64_t total_time = 0; - // Critical path scheduling. - // 0. Assign costs to all edges, using: - // a) The time the edge needed last time, if available. - // b) A fixed cost if this type of edge hasn't run before (0 for - // phony target, 1 for others) - // TODO: Find a better heuristic for edges without log entries - for (std::map::iterator it = want_.begin(), end = want_.end(); - it != end; ++it) { - Edge* edge = it->first; - if (edge->is_phony()) { - continue; - } - BuildLog::LogEntry* entry = - build_log->LookupByOutput(edge->outputs_[0]->path()); - if (!entry) { - edge->run_time_ms_ = 1; - total_time += 1; - continue; - } - const int64_t duration = entry->end_time - entry->start_time; - edge->run_time_ms_ = duration; - total_time += duration; - } + // total time if building all edges in serial. This value is big + // enough to ensure higher priority target's initial critical time + // is always bigger than lower ones + int64_t total_time = AssignEdgeRuntime(build_log, want_); // Use backflow algorithm to compute critical times for all nodes, starting // from the destination nodes. diff --git a/src/build.h b/src/build.h index 4e36e16956..084607b59b 100644 --- a/src/build.h +++ b/src/build.h @@ -118,17 +118,6 @@ struct Plan { /// by information loaded from a dyndep file. bool DyndepsLoaded(DependencyScan* scan, const Node* node, const DyndepFile& ddf, std::string* err); -private: - bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err); - void UnmarkDependents(const Node* node, std::set* dependents); - bool AddSubTarget(const Node* node, const Node* dependent, std::string* err, - std::set* dyndep_walk); - - /// Update plan with knowledge that the given node is up to date. - /// If the node is a dyndep binding on any of its dependents, this - /// loads dynamic dependencies from the node's path. - /// Returns 'false' if loading dyndep info fails and 'true' otherwise. - bool NodeFinished(Node* node, std::string* err); /// Enumerate possible steps we want for an edge. enum Want @@ -143,6 +132,18 @@ struct Plan { kWantToFinish }; +private: + bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err); + void UnmarkDependents(const Node* node, std::set* dependents); + bool AddSubTarget(const Node* node, const Node* dependent, std::string* err, + std::set* dyndep_walk); + + /// Update plan with knowledge that the given node is up to date. + /// If the node is a dyndep binding on any of its dependents, this + /// loads dynamic dependencies from the node's path. + /// Returns 'false' if loading dyndep info fails and 'true' otherwise. + bool NodeFinished(Node* node, std::string* err); + void EdgeWanted(const Edge* edge); bool EdgeMaybeReady(std::map::iterator want_e, std::string* err); From 72f191288dfe29b70923b41a71bb41f610cb9437 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Fri, 27 Aug 2021 16:38:21 -0700 Subject: [PATCH 012/127] Make ScopedMetric portable, accurate, and efficient ScopedMetric used to use separate code paths for Win32 and other platforms. std::chrono makes it practical to use the same code everywhere. This requires C++ 11 so that is specified in the configuration file (C++ 11 was already used on Win32 builds). This change also makes ScopedMetric more accurate because it postpones converting the raw deltas until it is time to report them. Previously they would be converted at each measurement site, potentially losing almost a microsecond each time. Postponing the conversion also reduces the cost of ScopedMetric. The performance of std::chrono is currently harmed a bit by some design and code-gen issues in VC++, but bugs have been filed and these should be fixed. The overhead is low enough to be inconsequential for the O(15,000) ScopedMetric objects created in a build of Chrome. The net result is that the ScopedMetric code is now portable, more accurate, and as fast or faster than it used to be. This will resolve issue #2004. --- CMakeLists.txt | 1 + src/metrics.cc | 66 +++++++++++++++++++++----------------------------- src/metrics.h | 12 ++++----- 3 files changed, 34 insertions(+), 45 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b49c5b01f6..234724df72 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,6 +16,7 @@ else() endif() # --- compiler flags +set(CMAKE_CXX_STANDARD 11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) diff --git a/src/metrics.cc b/src/metrics.cc index dbaf22104c..caad696a9e 100644 --- a/src/metrics.cc +++ b/src/metrics.cc @@ -18,13 +18,8 @@ #include #include -#ifndef _WIN32 -#include -#else -#include -#endif - #include +#include #include "util.h" @@ -34,45 +29,30 @@ Metrics* g_metrics = NULL; namespace { -#ifndef _WIN32 /// Compute a platform-specific high-res timer value that fits into an int64. int64_t HighResTimer() { - timeval tv; - if (gettimeofday(&tv, NULL) < 0) - Fatal("gettimeofday: %s", strerror(errno)); - return (int64_t)tv.tv_sec * 1000*1000 + tv.tv_usec; + auto now = chrono::steady_clock::now(); + return chrono::duration_cast(now.time_since_epoch()).count(); } -/// Convert a delta of HighResTimer() values to microseconds. -int64_t TimerToMicros(int64_t dt) { - // No conversion necessary. - return dt; -} -#else -int64_t LargeIntegerToInt64(const LARGE_INTEGER& i) { - return ((int64_t)i.HighPart) << 32 | i.LowPart; -} +constexpr int64_t GetFrequency() { + constexpr auto den = std::chrono::steady_clock::period::den; + constexpr auto num = std::chrono::steady_clock::period::num; -int64_t HighResTimer() { - LARGE_INTEGER counter; - if (!QueryPerformanceCounter(&counter)) - Fatal("QueryPerformanceCounter: %s", GetLastErrorString().c_str()); - return LargeIntegerToInt64(counter); + // If numerator isn't 1 then we lose precision and that will need to be assessed. + static_assert(num == 1, "Numerator must be 1"); + return den / num; } int64_t TimerToMicros(int64_t dt) { - static int64_t ticks_per_sec = 0; - if (!ticks_per_sec) { - LARGE_INTEGER freq; - if (!QueryPerformanceFrequency(&freq)) - Fatal("QueryPerformanceFrequency: %s", GetLastErrorString().c_str()); - ticks_per_sec = LargeIntegerToInt64(freq); - } + // dt is in ticks. We want microseconds. + return (dt * 1000000) / GetFrequency(); +} +int64_t TimerToMicros(double dt) { // dt is in ticks. We want microseconds. - return (dt * 1000000) / ticks_per_sec; + return (dt * 1000000) / GetFrequency(); } -#endif } // anonymous namespace @@ -87,7 +67,9 @@ ScopedMetric::~ScopedMetric() { if (!metric_) return; metric_->count++; - int64_t dt = TimerToMicros(HighResTimer() - start_); + // Leave in the timer's natural frequency to avoid paying the conversion cost on + // every measurement. + int64_t dt = HighResTimer() - start_; metric_->sum += dt; } @@ -112,15 +94,21 @@ void Metrics::Report() { for (vector::iterator i = metrics_.begin(); i != metrics_.end(); ++i) { Metric* metric = *i; - double total = metric->sum / (double)1000; - double avg = metric->sum / (double)metric->count; + uint64_t micros = TimerToMicros(metric->sum); + double total = micros / (double)1000; + double avg = micros / (double)metric->count; printf("%-*s\t%-6d\t%-8.1f\t%.1f\n", width, metric->name.c_str(), metric->count, avg, total); } } -uint64_t Stopwatch::Now() const { - return TimerToMicros(HighResTimer()); +double Stopwatch::Elapsed() const { + // Convert to micros after converting to double to minimize error. + return 1e-6 * TimerToMicros(static_cast(NowRaw() - started_)); +} + +uint64_t Stopwatch::NowRaw() const { + return HighResTimer(); } int64_t GetTimeMillis() { diff --git a/src/metrics.h b/src/metrics.h index 11239b5b9f..549e935286 100644 --- a/src/metrics.h +++ b/src/metrics.h @@ -28,7 +28,7 @@ struct Metric { std::string name; /// Number of times we've hit the code path. int count; - /// Total time (in micros) we've spent on the code path. + /// Total time (in platform-dependent units) we've spent on the code path. int64_t sum; }; @@ -68,15 +68,15 @@ struct Stopwatch { Stopwatch() : started_(0) {} /// Seconds since Restart() call. - double Elapsed() const { - return 1e-6 * static_cast(Now() - started_); - } + double Elapsed() const; - void Restart() { started_ = Now(); } + void Restart() { started_ = NowRaw(); } private: uint64_t started_; - uint64_t Now() const; + // Return the current time using the native frequency of the high resolution + // timer. + uint64_t NowRaw() const; }; /// The primary interface to metrics. Use METRIC_RECORD("foobar") at the top From 04170ad365a64caba3f10fbc5e63714ff4c4ad5b Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Fri, 27 Aug 2021 17:02:20 -0700 Subject: [PATCH 013/127] Fix constexpr and clang-format --- CONTRIBUTING.md | 2 +- src/metrics.cc | 22 +++++++++++----------- src/metrics.h | 1 - 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be1fc02779..0e24dfd584 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,7 +15,7 @@ Generally it's the a few additions: * Any code merged into the Ninja codebase which will be part of the main - executable must compile as C++03. You may use C++11 features in a test or an + executable must compile as C++11. You may use C++14 features in a test or an unimportant tool if you guard your code with `#if __cplusplus >= 201103L`. * We have used `using namespace std;` a lot in the past. For new contributions, please try to avoid relying on it and instead whenever possible use `std::`. diff --git a/src/metrics.cc b/src/metrics.cc index caad696a9e..9a4dd12469 100644 --- a/src/metrics.cc +++ b/src/metrics.cc @@ -32,16 +32,18 @@ namespace { /// Compute a platform-specific high-res timer value that fits into an int64. int64_t HighResTimer() { auto now = chrono::steady_clock::now(); - return chrono::duration_cast(now.time_since_epoch()).count(); + return chrono::duration_cast( + now.time_since_epoch()) + .count(); } constexpr int64_t GetFrequency() { - constexpr auto den = std::chrono::steady_clock::period::den; - constexpr auto num = std::chrono::steady_clock::period::num; - - // If numerator isn't 1 then we lose precision and that will need to be assessed. - static_assert(num == 1, "Numerator must be 1"); - return den / num; + // If numerator isn't 1 then we lose precision and that will need to be + // assessed. + static_assert(std::chrono::steady_clock::period::num == 1, + "Numerator must be 1"); + return std::chrono::steady_clock::period::den / + std::chrono::steady_clock::period::num; } int64_t TimerToMicros(int64_t dt) { @@ -56,7 +58,6 @@ int64_t TimerToMicros(double dt) { } // anonymous namespace - ScopedMetric::ScopedMetric(Metric* metric) { metric_ = metric; if (!metric_) @@ -67,8 +68,8 @@ ScopedMetric::~ScopedMetric() { if (!metric_) return; metric_->count++; - // Leave in the timer's natural frequency to avoid paying the conversion cost on - // every measurement. + // Leave in the timer's natural frequency to avoid paying the conversion cost + // on every measurement. int64_t dt = HighResTimer() - start_; metric_->sum += dt; } @@ -114,4 +115,3 @@ uint64_t Stopwatch::NowRaw() const { int64_t GetTimeMillis() { return TimerToMicros(HighResTimer()) / 1000; } - diff --git a/src/metrics.h b/src/metrics.h index 549e935286..c9ba2366af 100644 --- a/src/metrics.h +++ b/src/metrics.h @@ -32,7 +32,6 @@ struct Metric { int64_t sum; }; - /// A scoped object for recording a metric across the body of a function. /// Used by the METRIC_RECORD macro. struct ScopedMetric { From 1130200c2396c58f67831fccbd4eee4df196f696 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Sat, 28 Aug 2021 10:01:49 -0700 Subject: [PATCH 014/127] Update __cplusplus value --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0e24dfd584..69dac7442e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,7 @@ a few additions: * Any code merged into the Ninja codebase which will be part of the main executable must compile as C++11. You may use C++14 features in a test or an - unimportant tool if you guard your code with `#if __cplusplus >= 201103L`. + unimportant tool if you guard your code with `#if __cplusplus >= 201402L`. * We have used `using namespace std;` a lot in the past. For new contributions, please try to avoid relying on it and instead whenever possible use `std::`. However, please do not change existing code simply to add `std::` unless your From 77448b4fb7dc1e8baad0cc75c4d6d04fabc21def Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Fri, 8 Oct 2021 15:11:49 +0100 Subject: [PATCH 015/127] Remove redundant include --- src/build.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/build.h b/src/build.h index 084607b59b..9b49ffb238 100644 --- a/src/build.h +++ b/src/build.h @@ -16,7 +16,6 @@ #define NINJA_BUILD_H_ #include -#include #include #include #include From d0cc2383dd0da65ff0b0855bc6f2f53251f8cccf Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 16 Nov 2021 14:35:10 +0100 Subject: [PATCH 016/127] GoogleTest 1.11.0 now requires linking pthread on Linux --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 341eb1c52f..f714e9eb15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -198,7 +198,8 @@ if(BUILD_TESTING) if(WIN32) target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc) endif() - target_link_libraries(ninja_test PRIVATE libninja libninja-re2c gtest) + find_package(Threads REQUIRED) + target_link_libraries(ninja_test PRIVATE libninja libninja-re2c gtest Threads::Threads) foreach(perftest build_log_perftest From aa2cc3f75fb57705271092f19e53d179fd9640c6 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 16 Nov 2021 14:37:48 +0100 Subject: [PATCH 017/127] GoogleTest doesn't allow calling ASSERT from a constructor src/missing_deps_test.cc:36:5: error: returning a value from a constructor 36 | ASSERT_EQ("", err); | ^~~~~~~~~ --- src/missing_deps_test.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/missing_deps_test.cc b/src/missing_deps_test.cc index db66885d07..3cc4d287e3 100644 --- a/src/missing_deps_test.cc +++ b/src/missing_deps_test.cc @@ -33,7 +33,7 @@ struct MissingDependencyScannerTest : public testing::Test { scanner_(&delegate_, &deps_log_, &state_, &filesystem_) { std::string err; deps_log_.OpenForWrite(kTestDepsLogFilename, &err); - ASSERT_EQ("", err); + EXPECT_EQ("", err); } MissingDependencyScanner& scanner() { return scanner_; } @@ -159,4 +159,3 @@ TEST_F(MissingDependencyScannerTest, CycleInGraph) { std::vector nodes = state_.RootNodes(&err); ASSERT_NE("", err); } - From 40a57813bd9a2551d8377f085974074d1aefc6cb Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 15:01:50 -1000 Subject: [PATCH 018/127] Make chrono optional, and enable updated __cplusplus on MSVC --- CMakeLists.txt | 1 + configure.py | 3 ++- src/metrics.cc | 29 +++++++++++++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1f4e160cc5..b9a8af6192 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,6 +20,7 @@ set(CMAKE_CXX_STANDARD 11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + # These settings seem to be completely ignored - see configure.py for the actual options. add_compile_options(/W4 /wd4100 /wd4267 /wd4706 /wd4702 /wd4244 /GR- /Zc:__cplusplus) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) else() diff --git a/configure.py b/configure.py index 43904349a8..0ff171610c 100755 --- a/configure.py +++ b/configure.py @@ -325,7 +325,8 @@ def binary(name): '/wd4267', '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS', '/D_HAS_EXCEPTIONS=0', - '/DNINJA_PYTHON="%s"' % options.with_python] + '/DNINJA_PYTHON="%s"' % options.with_python, + '/Zc:__cplusplus'] if platform.msvc_needs_fs(): cflags.append('/FS') ldflags = ['/DEBUG', '/libpath:$builddir'] diff --git a/src/metrics.cc b/src/metrics.cc index 9a4dd12469..3b403a957d 100644 --- a/src/metrics.cc +++ b/src/metrics.cc @@ -14,12 +14,22 @@ #include "metrics.h" +#if __cplusplus >= 201103L +// C++ 11 is not available on older Linux build machines. +#define USE_CHRONO +#endif + #include #include #include #include + +#if defined(USE_CHRONO) #include +#else +#include +#endif #include "util.h" @@ -31,12 +41,20 @@ namespace { /// Compute a platform-specific high-res timer value that fits into an int64. int64_t HighResTimer() { +#if defined(USE_CHRONO) auto now = chrono::steady_clock::now(); return chrono::duration_cast( now.time_since_epoch()) .count(); +#else + timeval tv; + if (gettimeofday(&tv, NULL) < 0) + Fatal("gettimeofday: %s", strerror(errno)); + return (int64_t)tv.tv_sec * 1000*1000 + tv.tv_usec; +#endif } +#if defined(USE_CHRONO) constexpr int64_t GetFrequency() { // If numerator isn't 1 then we lose precision and that will need to be // assessed. @@ -45,15 +63,26 @@ constexpr int64_t GetFrequency() { return std::chrono::steady_clock::period::den / std::chrono::steady_clock::period::num; } +#endif int64_t TimerToMicros(int64_t dt) { +#if defined(USE_CHRONO) // dt is in ticks. We want microseconds. return (dt * 1000000) / GetFrequency(); +#else + // No conversion necessary. + return dt; +#endif } int64_t TimerToMicros(double dt) { +#if defined(USE_CHRONO) // dt is in ticks. We want microseconds. return (dt * 1000000) / GetFrequency(); +#else + // No conversion necessary. + return dt; +#endif } } // anonymous namespace From 928151a5a2051f1b90386a8cc0960733a29c6b46 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 15:51:54 -1000 Subject: [PATCH 019/127] Enabling C++11 in configure.py --- CMakeLists.txt | 1 - configure.py | 1 + src/metrics.cc | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b9a8af6192..13c715256d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,6 @@ else() endif() # --- compiler flags -set(CMAKE_CXX_STANDARD 11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) diff --git a/configure.py b/configure.py index 0ff171610c..4c81e5d159 100755 --- a/configure.py +++ b/configure.py @@ -340,6 +340,7 @@ def binary(name): '-Wno-unused-parameter', '-fno-rtti', '-fno-exceptions', + '-std=c++11', '-fvisibility=hidden', '-pipe', '-DNINJA_PYTHON="%s"' % options.with_python] if options.debug: diff --git a/src/metrics.cc b/src/metrics.cc index 3b403a957d..9b43883435 100644 --- a/src/metrics.cc +++ b/src/metrics.cc @@ -28,6 +28,7 @@ #if defined(USE_CHRONO) #include #else +#error #include #endif From 809e67e5c333f2750909f9bcb21b405cf497deb3 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 16:03:16 -1000 Subject: [PATCH 020/127] Restore CMakeLists.txt change --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 13c715256d..b9a8af6192 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,6 +16,7 @@ else() endif() # --- compiler flags +set(CMAKE_CXX_STANDARD 11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) From a35da107da8991aaaa3a441f5bce1234ead86f9f Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 17:00:31 -1000 Subject: [PATCH 021/127] Update CMake c++11 request per CR comment --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b9a8af6192..04e5c349d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ else() endif() # --- compiler flags -set(CMAKE_CXX_STANDARD 11) +target_compile_features(libninja PUBLIC cxx_std_11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) From 34ec9f550db8b764f05aca39b8c306b946b8c9cd Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 17:06:57 -1000 Subject: [PATCH 022/127] libninja -> ninja --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 04e5c349d4..125b21caf0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ else() endif() # --- compiler flags -target_compile_features(libninja PUBLIC cxx_std_11) +target_compile_features(ninja PUBLIC cxx_std_11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) From 01e7c50f508b47409f2554285237722f010d0720 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 17:12:39 -1000 Subject: [PATCH 023/127] Adjusting placement --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 125b21caf0..9e7818841b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,6 @@ else() endif() # --- compiler flags -target_compile_features(ninja PUBLIC cxx_std_11) if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) @@ -138,6 +137,8 @@ else() endif() endif() +target_compile_features(libninja PUBLIC cxx_std_11) + #Fixes GetActiveProcessorCount on MinGW if(MINGW) target_compile_definitions(libninja PRIVATE _WIN32_WINNT=0x0601 __USE_MINGW_ANSI_STDIO=1) From 65c82f4b99f29db594d6c65fee87f659d0cf94c0 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 17 Feb 2022 18:51:53 -1000 Subject: [PATCH 024/127] Cleanup --- CMakeLists.txt | 3 ++- configure.py | 6 ++++-- src/metrics.cc | 30 ------------------------------ 3 files changed, 6 insertions(+), 33 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9e7818841b..69ce1abb2c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,7 +19,8 @@ endif() if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) - # These settings seem to be completely ignored - see configure.py for the actual options. + # Note that these settings are separately specified in configure.py, and + # these lists should be kept in sync. add_compile_options(/W4 /wd4100 /wd4267 /wd4706 /wd4702 /wd4244 /GR- /Zc:__cplusplus) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) else() diff --git a/configure.py b/configure.py index 4c81e5d159..99a2c86e35 100755 --- a/configure.py +++ b/configure.py @@ -305,6 +305,8 @@ def binary(name): else: n.variable('ar', configure_env.get('AR', 'ar')) +# Note that build settings are separately specified in CMakeLists.txt and +# these lists should be kept in sync. if platform.is_msvc(): cflags = ['/showIncludes', '/nologo', # Don't print startup banner. @@ -320,13 +322,13 @@ def binary(name): # Disable warnings about ignored typedef in DbgHelp.h '/wd4091', '/GR-', # Disable RTTI. + '/Zc:__cplusplus', # Disable size_t -> int truncation warning. # We never have strings or arrays larger than 2**31. '/wd4267', '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS', '/D_HAS_EXCEPTIONS=0', - '/DNINJA_PYTHON="%s"' % options.with_python, - '/Zc:__cplusplus'] + '/DNINJA_PYTHON="%s"' % options.with_python] if platform.msvc_needs_fs(): cflags.append('/FS') ldflags = ['/DEBUG', '/libpath:$builddir'] diff --git a/src/metrics.cc b/src/metrics.cc index 9b43883435..9a4dd12469 100644 --- a/src/metrics.cc +++ b/src/metrics.cc @@ -14,23 +14,12 @@ #include "metrics.h" -#if __cplusplus >= 201103L -// C++ 11 is not available on older Linux build machines. -#define USE_CHRONO -#endif - #include #include #include #include - -#if defined(USE_CHRONO) #include -#else -#error -#include -#endif #include "util.h" @@ -42,20 +31,12 @@ namespace { /// Compute a platform-specific high-res timer value that fits into an int64. int64_t HighResTimer() { -#if defined(USE_CHRONO) auto now = chrono::steady_clock::now(); return chrono::duration_cast( now.time_since_epoch()) .count(); -#else - timeval tv; - if (gettimeofday(&tv, NULL) < 0) - Fatal("gettimeofday: %s", strerror(errno)); - return (int64_t)tv.tv_sec * 1000*1000 + tv.tv_usec; -#endif } -#if defined(USE_CHRONO) constexpr int64_t GetFrequency() { // If numerator isn't 1 then we lose precision and that will need to be // assessed. @@ -64,26 +45,15 @@ constexpr int64_t GetFrequency() { return std::chrono::steady_clock::period::den / std::chrono::steady_clock::period::num; } -#endif int64_t TimerToMicros(int64_t dt) { -#if defined(USE_CHRONO) // dt is in ticks. We want microseconds. return (dt * 1000000) / GetFrequency(); -#else - // No conversion necessary. - return dt; -#endif } int64_t TimerToMicros(double dt) { -#if defined(USE_CHRONO) // dt is in ticks. We want microseconds. return (dt * 1000000) / GetFrequency(); -#else - // No conversion necessary. - return dt; -#endif } } // anonymous namespace From 24d1f5f0c130338b382c60eb32731d2638b47d03 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Mon, 7 Mar 2022 16:48:04 +0000 Subject: [PATCH 025/127] Address review comments 1. Move EdgePriorityQueue to graph.h and inherit from priority_queue 2. Add comment about edge->critical_time() --- src/build.cc | 18 ++++++------------ src/build.h | 36 ------------------------------------ src/graph.h | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 48 deletions(-) diff --git a/src/build.cc b/src/build.cc index 6f9cf27dcd..d747b8a50d 100644 --- a/src/build.cc +++ b/src/build.cc @@ -76,15 +76,6 @@ bool DryRunCommandRunner::WaitForCommand(Result* result) { } // namespace -bool EdgeQueue::EdgePriorityCompare::operator()(const Edge* e1, const Edge* e2) const { - const int64_t ct1 = e1->critical_time(); - const int64_t ct2 = e2->critical_time(); - if (ct1 != ct2) { - return ct1 < ct2; - } - return e1->id_ < e2->id_; -} - Plan::Plan(Builder* builder) : builder_(builder) , command_edges_(0) @@ -162,7 +153,10 @@ void Plan::EdgeWanted(const Edge* edge) { Edge* Plan::FindWork() { if (ready_.empty()) return NULL; - return ready_.pop(); + + Edge* work = ready_.top(); + ready_.pop(); + return work; } void Plan::ScheduleWork(map::iterator want_e) { @@ -182,7 +176,7 @@ void Plan::ScheduleWork(map::iterator want_e) { pool->DelayEdge(edge); EdgeSet new_edges; pool->RetrieveReadyEdges(&new_edges); - ready_.push(new_edges.begin(), new_edges.end()); + ready_.push_multiple(new_edges.begin(), new_edges.end()); } else { pool->EdgeScheduled(*edge); ready_.push(edge); @@ -199,7 +193,7 @@ bool Plan::EdgeFinished(Edge* edge, EdgeResult result, string* err) { edge->pool()->EdgeFinished(*edge); EdgeSet new_edges; edge->pool()->RetrieveReadyEdges(&new_edges); - ready_.push(new_edges.begin(), new_edges.end()); + ready_.push_multiple(new_edges.begin(), new_edges.end()); // The rest of this function only applies to successful commands. if (result != kEdgeSucceeded) diff --git a/src/build.h b/src/build.h index 9b49ffb238..652bc40d48 100644 --- a/src/build.h +++ b/src/build.h @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -36,41 +35,6 @@ struct State; struct Status; -// Set of ready edges, sorted by priority -class EdgeQueue { - struct EdgePriorityCompare { - bool operator()(const Edge* e1, const Edge* e2) const; - }; - - std::priority_queue, EdgePriorityCompare> queue_; - - public: - void push(Edge* edge) { - queue_.push(edge); - } - - template - void push(It first, It last) { - for (; first != last; ++first) { - push(*first); - } - } - - Edge* pop() { - Edge* ret = queue_.top(); - queue_.pop(); - return ret; - } - - void clear() { - queue_ = std::priority_queue, EdgePriorityCompare>(); - } - - size_t size() const { return queue_.size(); } - - bool empty() const { return queue_.empty(); } -}; - /// Plan stores the state of a build plan: what we intend to build, /// which steps we're ready to execute. struct Plan { diff --git a/src/graph.h b/src/graph.h index 7851504ff6..a8bf0cde64 100644 --- a/src/graph.h +++ b/src/graph.h @@ -18,6 +18,7 @@ #include #include #include +#include #include "dyndep.h" #include "eval_env.h" @@ -172,6 +173,10 @@ struct Edge { void Dump(const char* prefix="") const; + // Critical time is the estimated exection time in ms of the edges + // forming the longest time-weighted path to the target output. + // This quantity is used as a priority during build scheduling. + // NOTE: Defaults to -1 as a marker smaller than any valid time int64_t critical_time() const { return critical_time_; } void set_critical_time(int64_t critical_time) { critical_time_ = critical_time; @@ -343,4 +348,32 @@ struct DependencyScan { DyndepLoader dyndep_loader_; }; +// Prioritize edges by largest critical time first +struct EdgePriorityCompare { + bool operator()(const Edge* e1, const Edge* e2) const { + const int64_t ct1 = e1->critical_time(); + const int64_t ct2 = e2->critical_time(); + if (ct1 != ct2) { + return ct1 < ct2; + } + return e1->id_ < e2->id_; + } +}; + +// Set of ready edges, sorted by priority +class EdgeQueue: + public std::priority_queue, EdgePriorityCompare>{ +public: + void clear() { + c.clear(); + } + + template + void push_multiple(It first, It last) { + for (; first != last; ++first) { + push(*first); + } + } +}; + #endif // NINJA_GRAPH_H_ From 6ee904948f25379d63941c1b23127705d9e0d9b0 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Mon, 7 Mar 2022 16:56:47 +0000 Subject: [PATCH 026/127] Remove unnecessary whitespace --- src/build.cc | 1 - src/build.h | 1 - 2 files changed, 2 deletions(-) diff --git a/src/build.cc b/src/build.cc index afe490d4b0..d6be1c035c 100644 --- a/src/build.cc +++ b/src/build.cc @@ -75,7 +75,6 @@ bool DryRunCommandRunner::WaitForCommand(Result* result) { } // namespace - Plan::Plan(Builder* builder) : builder_(builder) , command_edges_(0) diff --git a/src/build.h b/src/build.h index 652bc40d48..e91edbed66 100644 --- a/src/build.h +++ b/src/build.h @@ -34,7 +34,6 @@ struct Node; struct State; struct Status; - /// Plan stores the state of a build plan: what we intend to build, /// which steps we're ready to execute. struct Plan { From 1128a56353cc596a86be4943be88c403663296f3 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Mon, 7 Mar 2022 17:36:38 +0000 Subject: [PATCH 027/127] Add simple test for EdgeQueue --- src/graph.h | 2 +- src/graph_test.cc | 53 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/src/graph.h b/src/graph.h index 6e22f161db..0604ea73f3 100644 --- a/src/graph.h +++ b/src/graph.h @@ -384,7 +384,7 @@ struct EdgePriorityCompare { if (ct1 != ct2) { return ct1 < ct2; } - return e1->id_ < e2->id_; + return e1->id_ > e2->id_; } }; diff --git a/src/graph_test.cc b/src/graph_test.cc index 5314bc5f5f..97726cee01 100644 --- a/src/graph_test.cc +++ b/src/graph_test.cc @@ -944,3 +944,56 @@ TEST_F(GraphTest, PhonyDepsMtimes) { EXPECT_EQ(out1->mtime(), out1Mtime1); EXPECT_TRUE(out1->dirty()); } + +// Test that EdgeQueue correctly prioritizes by critical time +TEST_F(GraphTest, EdgeQueuePriority) { + + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +"rule r\n" +" command = unused\n" +"build out1: r in1\n" +"build out2: r in2\n" +"build out3: r in3\n" +)); + + const int n_edges = 3; + Edge *(edges)[n_edges] = { + GetNode("out1")->in_edge(), + GetNode("out2")->in_edge(), + GetNode("out3")->in_edge(), + }; + + // Output is largest critical time to smallest + for (int i = 0; i < n_edges; ++i) { + edges[i]->set_critical_time(i * 10); + } + + EdgeQueue queue; + for (int i = 0; i < n_edges; ++i) { + queue.push(edges[i]); + } + + EXPECT_EQ(queue.size(), n_edges); + for (int i = 0; i < n_edges; ++i) { + EXPECT_EQ(queue.top(), edges[n_edges - 1 - i]); + queue.pop(); + } + EXPECT_TRUE(queue.empty()); + + // When there is ambiguity, the lowest edge id comes first + for (int i = 0; i < n_edges; ++i) { + edges[i]->set_critical_time(0); + } + + queue.push(edges[1]); + queue.push(edges[2]); + queue.push(edges[0]); + + for (int i = 0; i < n_edges; ++i) { + EXPECT_EQ(queue.top(), edges[i]); + queue.pop(); + } + EXPECT_TRUE(queue.empty()); +} + + From a8611647de41d52a05ee9e85a767e59d1c923ce6 Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Mon, 7 Mar 2022 20:42:53 +0000 Subject: [PATCH 028/127] Improve comments and retrieve edges into ready_queue directly --- src/build.cc | 24 ++++++++++-------------- src/build.h | 4 ++-- src/graph.h | 23 ++++++++++++----------- src/graph_test.cc | 2 +- src/state.cc | 4 ++-- src/state.h | 2 +- 6 files changed, 28 insertions(+), 31 deletions(-) diff --git a/src/build.cc b/src/build.cc index d6be1c035c..de62a62a0a 100644 --- a/src/build.cc +++ b/src/build.cc @@ -173,9 +173,7 @@ void Plan::ScheduleWork(map::iterator want_e) { Pool* pool = edge->pool(); if (pool->ShouldDelayEdge()) { pool->DelayEdge(edge); - EdgeSet new_edges; - pool->RetrieveReadyEdges(&new_edges); - ready_.push_multiple(new_edges.begin(), new_edges.end()); + pool->RetrieveReadyEdges(&ready_); } else { pool->EdgeScheduled(*edge); ready_.push(edge); @@ -190,9 +188,7 @@ bool Plan::EdgeFinished(Edge* edge, EdgeResult result, string* err) { // See if this job frees up any delayed jobs. if (directly_wanted) edge->pool()->EdgeFinished(*edge); - EdgeSet new_edges; - edge->pool()->RetrieveReadyEdges(&new_edges); - ready_.push_multiple(new_edges.begin(), new_edges.end()); + edge->pool()->RetrieveReadyEdges(&ready_); // The rest of this function only applies to successful commands. if (result != kEdgeSucceeded) @@ -541,10 +537,10 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // Use backflow algorithm to compute critical times for all nodes, starting // from the destination nodes. // XXX: ignores pools - std::queue breadthFirstEdges; // Queue, for breadth-first traversal - std::set active_edges; // Set of in breadthFirstEdges + std::queue work_queue; // Queue, for breadth-first traversal + std::set active_edges; // Set of edges in work_queue SeenBefore seen_edge( - &active_edges); // Test for uniqueness in breadthFirstEdges + &active_edges); // Test for uniqueness in work_queue for (std::vector::reverse_iterator it = targets_.rbegin(), end = targets_.rend(); @@ -557,14 +553,14 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { priority_weight + std::max(in->run_time_ms_, in->critical_time())); if (!seen_edge(in)) { - breadthFirstEdges.push(in); + work_queue.push(in); } } } - while (!breadthFirstEdges.empty()) { - Edge* e = breadthFirstEdges.front(); - breadthFirstEdges.pop(); + while (!work_queue.empty()) { + Edge* e = work_queue.front(); + work_queue.pop(); active_edges.erase(e); for (std::vector::iterator it = e->inputs_.begin(), @@ -579,7 +575,7 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { if (proposed_time > in->critical_time()) { in->set_critical_time(proposed_time); if (!seen_edge(in)) { - breadthFirstEdges.push(in); + work_queue.push(in); } } } diff --git a/src/build.h b/src/build.h index e91edbed66..0d7c01a49b 100644 --- a/src/build.h +++ b/src/build.h @@ -22,7 +22,7 @@ #include #include "depfile_parser.h" -#include "graph.h" // XXX needed for DependencyScan; should rearrange. +#include "graph.h" #include "exit_status.h" #include "util.h" // int64_t @@ -120,7 +120,7 @@ struct Plan { /// we want for the edge. std::map want_; - EdgeQueue ready_; + EdgePriorityQueue ready_; Builder* builder_; /// user provided targets in build order, earlier one have higher priority diff --git a/src/graph.h b/src/graph.h index 0604ea73f3..4b45dad0ff 100644 --- a/src/graph.h +++ b/src/graph.h @@ -197,7 +197,7 @@ struct Edge { void Dump(const char* prefix="") const; - // Critical time is the estimated exection time in ms of the edges + // Critical time is the estimated execution time in ms of the edges // forming the longest time-weighted path to the target output. // This quantity is used as a priority during build scheduling. // NOTE: Defaults to -1 as a marker smaller than any valid time @@ -376,7 +376,13 @@ struct DependencyScan { DyndepLoader dyndep_loader_; }; -// Prioritize edges by largest critical time first +// Implements a less comarison for edges by priority, where highest +// priority is defined lexicographically first by largest critical +// time, then lowest ID. +// +// Including ID means that wherever the critical times are the same, +// the edges are executed in ascending ID order which was historically +// how all tasks were scheduled. struct EdgePriorityCompare { bool operator()(const Edge* e1, const Edge* e2) const { const int64_t ct1 = e1->critical_time(); @@ -388,20 +394,15 @@ struct EdgePriorityCompare { } }; -// Set of ready edges, sorted by priority -class EdgeQueue: +// A priority queue holding non-owning Edge pointers. top() will +// return the edge with the largest critical time, and lowest ID if +// more than one edge has the same critical time. +class EdgePriorityQueue: public std::priority_queue, EdgePriorityCompare>{ public: void clear() { c.clear(); } - - template - void push_multiple(It first, It last) { - for (; first != last; ++first) { - push(*first); - } - } }; #endif // NINJA_GRAPH_H_ diff --git a/src/graph_test.cc b/src/graph_test.cc index 97726cee01..d6573873d5 100644 --- a/src/graph_test.cc +++ b/src/graph_test.cc @@ -968,7 +968,7 @@ TEST_F(GraphTest, EdgeQueuePriority) { edges[i]->set_critical_time(i * 10); } - EdgeQueue queue; + EdgePriorityQueue queue; for (int i = 0; i < n_edges; ++i) { queue.push(edges[i]); } diff --git a/src/state.cc b/src/state.cc index 556b0d8802..e194519d68 100644 --- a/src/state.cc +++ b/src/state.cc @@ -38,13 +38,13 @@ void Pool::DelayEdge(Edge* edge) { delayed_.insert(edge); } -void Pool::RetrieveReadyEdges(EdgeSet* ready_queue) { +void Pool::RetrieveReadyEdges(EdgePriorityQueue* ready_queue) { DelayedEdges::iterator it = delayed_.begin(); while (it != delayed_.end()) { Edge* edge = *it; if (current_use_ + edge->weight() > depth_) break; - ready_queue->insert(edge); + ready_queue->push(edge); EdgeScheduled(*edge); ++it; } diff --git a/src/state.h b/src/state.h index 878ac6d991..2b0fa908e4 100644 --- a/src/state.h +++ b/src/state.h @@ -62,7 +62,7 @@ struct Pool { void DelayEdge(Edge* edge); /// Pool will add zero or more edges to the ready_queue - void RetrieveReadyEdges(EdgeSet* ready_queue); + void RetrieveReadyEdges(EdgePriorityQueue* ready_queue); /// Dump the Pool and its edges (useful for debugging). void Dump() const; From 026498fb36b67518501eec2edc66ddcd64f64b1f Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Mon, 7 Mar 2022 21:31:07 +0000 Subject: [PATCH 029/127] Add run_time_ms accessors and more comments --- src/build.cc | 28 ++++++++++++++-------------- src/graph.h | 8 ++++++++ 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/src/build.cc b/src/build.cc index de62a62a0a..ec71bf3dbb 100644 --- a/src/build.cc +++ b/src/build.cc @@ -452,7 +452,7 @@ struct SeenBefore { } }; -// Assign run_time_ms_ for all wanted edges, and returns total time for all edges +// Assign run_time_ms for all wanted edges, and returns total time for all edges // For phony edges, 0 cost. // For edges with a build history, use the last build time. // For edges without history, use the 75th percentile time for edges with history. @@ -462,6 +462,7 @@ int64_t AssignEdgeRuntime(BuildLog* build_log, bool missing_durations = false; std::vector durations; int64_t total_time = 0; + const int64_t kUnknownRunTime = -1; // marker value for the two loops below. for (std::map::const_iterator it = want.begin(), end = want.end(); @@ -474,11 +475,11 @@ int64_t AssignEdgeRuntime(BuildLog* build_log, build_log->LookupByOutput(edge->outputs_[0]->path()); if (!entry) { missing_durations = true; - edge->run_time_ms_ = -1; // -1 to mark as needing filled in + edge->set_run_time_ms(kUnknownRunTime); // mark as needing filled in continue; } const int64_t duration = entry->end_time - entry->start_time; - edge->run_time_ms_ = duration; + edge->set_run_time_ms(duration); total_time += duration; durations.push_back(duration); } @@ -504,10 +505,10 @@ int64_t AssignEdgeRuntime(BuildLog* build_log, end = want.end(); it != end; ++it) { Edge* edge = it->first; - if (edge->run_time_ms_ >= 0) { + if (edge->run_time_ms() != kUnknownRunTime) { continue; } - edge->run_time_ms_ = p75_time; + edge->set_run_time_ms(p75_time); total_time += p75_time; } return total_time; @@ -542,16 +543,15 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { SeenBefore seen_edge( &active_edges); // Test for uniqueness in work_queue - for (std::vector::reverse_iterator it = targets_.rbegin(), - end = targets_.rend(); - it != end; ++it) { - if (Edge* in = (*it)->in_edge()) { - // Use initial critical time: total_time * N. This means higher - // priority targets always get a higher critical time value - int64_t priority_weight = (it - targets_.rbegin()) * total_time; + for (size_t i = 0; i < targets_.size(); ++i) { + const Node* target = targets_[i]; + if (Edge* in = target->in_edge()) { + // Add a bias to ensure that targets that appear first in |targets_| have a larger critical time than + // those that follow them. E.g. for 3 targets: [2*total_time, total_time, 0]. + int64_t priority_weight = (targets_.size() - i - 1) * total_time; in->set_critical_time( priority_weight + - std::max(in->run_time_ms_, in->critical_time())); + std::max(in->run_time_ms(), in->critical_time())); if (!seen_edge(in)) { work_queue.push(in); } @@ -571,7 +571,7 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { continue; } // Only process edge if this node offers a higher critical time - const int64_t proposed_time = e->critical_time() + in->run_time_ms_; + const int64_t proposed_time = e->critical_time() + in->run_time_ms(); if (proposed_time > in->critical_time()) { in->set_critical_time(proposed_time); if (!seen_edge(in)) { diff --git a/src/graph.h b/src/graph.h index 4b45dad0ff..728cdc8ae6 100644 --- a/src/graph.h +++ b/src/graph.h @@ -206,6 +206,14 @@ struct Edge { critical_time_ = critical_time; } + // Run time in ms for this edge's command. + // Taken from the build log if present, or estimated otherwise. + // Default initialized to 0. + int64_t run_time_ms() const { return run_time_ms_; } + void set_run_time_ms(int64_t run_time_ms) { + run_time_ms_ = run_time_ms; + } + const Rule* rule_; Pool* pool_; std::vector inputs_; From 4bd8db1fa8287ec0b828e98a7d2bcdf3d6b1904f Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Tue, 8 Mar 2022 01:40:43 +0000 Subject: [PATCH 030/127] Add test and fix priority bug AddTarget cannot add edges to the ready queue before the critical time has been computed. --- src/build.cc | 69 ++++++++++++++--- src/build.h | 9 ++- src/build_test.cc | 190 +++++++++++++++++++++++++++++++++++++++------- 3 files changed, 231 insertions(+), 37 deletions(-) diff --git a/src/build.cc b/src/build.cc index ec71bf3dbb..1f1e153ef9 100644 --- a/src/build.cc +++ b/src/build.cc @@ -124,8 +124,6 @@ bool Plan::AddSubTarget(const Node* node, const Node* dependent, string* err, if (node->dirty() && want == kWantNothing) { want = kWantToStart; EdgeWanted(edge); - if (!dyndep_walk && edge->AllInputsReady()) - ScheduleWork(want_ins.first); } if (dyndep_walk) @@ -514,14 +512,27 @@ int64_t AssignEdgeRuntime(BuildLog* build_log, return total_time; } +int64_t AssignDefaultEdgeRuntime(std::map &want) { + int64_t total_time = 0; + + for (std::map::const_iterator it = want.begin(), + end = want.end(); + it != end; ++it) { + Edge* edge = it->first; + if (edge->is_phony()) { + continue; + } + + edge->set_run_time_ms(1); + ++total_time; + } + return total_time; +} + } // namespace void Plan::ComputeCriticalTime(BuildLog* build_log) { - // testcases have no build_log - if (!build_log) - return; - - METRIC_RECORD("ComputePriorityList"); + METRIC_RECORD("ComputeCriticalTime"); // Remove duplicate targets { std::set seen; @@ -533,7 +544,10 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // total time if building all edges in serial. This value is big // enough to ensure higher priority target's initial critical time // is always bigger than lower ones - int64_t total_time = AssignEdgeRuntime(build_log, want_); + const int64_t total_time = build_log ? + AssignEdgeRuntime(build_log, want_) : + AssignDefaultEdgeRuntime(want_); // Plan tests have no build_log + // Use backflow algorithm to compute critical times for all nodes, starting // from the destination nodes. @@ -582,6 +596,42 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { } } +void Plan::ScheduleInitialEdges() { + // Add ready edges to queue. + assert(ready_.empty()); + std::set pools; + + for (std::map::iterator it = want_.begin(), + end = want_.end(); it != end; ++it) { + Edge* edge = it->first; + Plan::Want want = it->second; + if (!(want == kWantToStart && edge->AllInputsReady())) { + continue; + } + + Pool* pool = edge->pool(); + if (pool->ShouldDelayEdge()) { + pool->DelayEdge(edge); + pools.insert(pool); + } else { + ScheduleWork(it); + } + } + + // Call RetrieveReadyEdges only once at the end so higher priority + // edges are retrieved first, not the ones that happen to be first + // in the want_ map. + for (std::set::iterator it=pools.begin(), + end = pools.end(); it != end; ++it) { + (*it)->RetrieveReadyEdges(&ready_); + } +} + +void Plan::PrepareQueue(BuildLog* build_log) { + ComputeCriticalTime(build_log); + ScheduleInitialEdges(); +} + void Plan::Dump() const { printf("pending: %d\n", (int)want_.size()); for (map::const_iterator e = want_.begin(); e != want_.end(); ++e) { @@ -743,8 +793,7 @@ bool Builder::AlreadyUpToDate() const { bool Builder::Build(string* err) { assert(!AlreadyUpToDate()); - - plan_.ComputeCriticalTime(scan_.build_log()); + plan_.PrepareQueue(scan_.build_log()); status_->PlanHasTotalEdges(plan_.command_edge_count()); int pending_commands = 0; diff --git a/src/build.h b/src/build.h index 0d7c01a49b..7719d9a9a4 100644 --- a/src/build.h +++ b/src/build.h @@ -74,7 +74,9 @@ struct Plan { /// Reset state. Clears want and ready sets. void Reset(); - void ComputeCriticalTime(BuildLog* build_log); + + // After all targets have been added, prepares the ready queue for find work. + void PrepareQueue(BuildLog* build_log); /// Update the build plan to account for modifications made to the graph /// by information loaded from a dyndep file. @@ -95,11 +97,16 @@ struct Plan { }; private: + void ComputeCriticalTime(BuildLog* build_log); bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err); void UnmarkDependents(const Node* node, std::set* dependents); bool AddSubTarget(const Node* node, const Node* dependent, std::string* err, std::set* dyndep_walk); + // Add edges that kWantToStart into the ready queue + // Must be called after ComputeCriticalTime and before FindWork + void ScheduleInitialEdges(); + /// Update plan with knowledge that the given node is up to date. /// If the node is a dyndep binding on any of its dependents, this /// loads dynamic dependencies from the node's path. diff --git a/src/build_test.cc b/src/build_test.cc index 4ef62b2113..50a53948b8 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -50,6 +50,14 @@ struct PlanTest : public StateTestWithBuiltinRules { sort(ret->begin(), ret->end(), CompareEdgesByOutput::cmp); } + void PrepareForTarget(const char* node, BuildLog *log=NULL) { + string err; + EXPECT_TRUE(plan_.AddTarget(GetNode(node), &err)); + ASSERT_EQ("", err); + plan_.PrepareQueue(log); + ASSERT_TRUE(plan_.more_to_do()); + } + void TestPoolWithDepthOne(const char *test_case); }; @@ -59,10 +67,7 @@ TEST_F(PlanTest, Basic) { "build mid: cat in\n")); GetNode("mid")->MarkDirty(); GetNode("out")->MarkDirty(); - string err; - EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err)); - ASSERT_EQ("", err); - ASSERT_TRUE(plan_.more_to_do()); + PrepareForTarget("out"); Edge* edge = plan_.FindWork(); ASSERT_TRUE(edge); @@ -71,6 +76,7 @@ TEST_F(PlanTest, Basic) { ASSERT_FALSE(plan_.FindWork()); + string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); @@ -95,15 +101,12 @@ TEST_F(PlanTest, DoubleOutputDirect) { GetNode("mid1")->MarkDirty(); GetNode("mid2")->MarkDirty(); GetNode("out")->MarkDirty(); - - string err; - EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err)); - ASSERT_EQ("", err); - ASSERT_TRUE(plan_.more_to_do()); + PrepareForTarget("out"); Edge* edge; edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat in + string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); @@ -128,14 +131,12 @@ TEST_F(PlanTest, DoubleOutputIndirect) { GetNode("b1")->MarkDirty(); GetNode("b2")->MarkDirty(); GetNode("out")->MarkDirty(); - string err; - EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err)); - ASSERT_EQ("", err); - ASSERT_TRUE(plan_.more_to_do()); + PrepareForTarget("out"); Edge* edge; edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat in + string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); @@ -169,15 +170,12 @@ TEST_F(PlanTest, DoubleDependent) { GetNode("a1")->MarkDirty(); GetNode("a2")->MarkDirty(); GetNode("out")->MarkDirty(); - - string err; - EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err)); - ASSERT_EQ("", err); - ASSERT_TRUE(plan_.more_to_do()); + PrepareForTarget("out"); Edge* edge; edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat in + string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); @@ -209,6 +207,7 @@ void PlanTest::TestPoolWithDepthOne(const char* test_case) { ASSERT_EQ("", err); EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err)); ASSERT_EQ("", err); + plan_.PrepareQueue(NULL); ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); @@ -284,10 +283,7 @@ TEST_F(PlanTest, PoolsWithDepthTwo) { GetNode("outb" + string(1, '1' + static_cast(i)))->MarkDirty(); } GetNode("allTheThings")->MarkDirty(); - - string err; - EXPECT_TRUE(plan_.AddTarget(GetNode("allTheThings"), &err)); - ASSERT_EQ("", err); + PrepareForTarget("allTheThings"); deque edges; FindWorkSorted(&edges, 5); @@ -306,6 +302,7 @@ TEST_F(PlanTest, PoolsWithDepthTwo) { ASSERT_EQ("outb3", edge->outputs_[0]->path()); // finish out1 + string err; plan_.EdgeFinished(edges.front(), Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edges.pop_front(); @@ -363,10 +360,7 @@ TEST_F(PlanTest, PoolWithRedundantEdges) { GetNode("bar.cpp.obj")->MarkDirty(); GetNode("libfoo.a")->MarkDirty(); GetNode("all")->MarkDirty(); - string err; - EXPECT_TRUE(plan_.AddTarget(GetNode("all"), &err)); - ASSERT_EQ("", err); - ASSERT_TRUE(plan_.more_to_do()); + PrepareForTarget("all"); Edge* edge = NULL; @@ -375,6 +369,7 @@ TEST_F(PlanTest, PoolWithRedundantEdges) { edge = initial_edges[1]; // Foo first ASSERT_EQ("foo.cpp", edge->outputs_[0]->path()); + string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); @@ -439,6 +434,7 @@ TEST_F(PlanTest, PoolWithFailingEdge) { ASSERT_EQ("", err); EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err)); ASSERT_EQ("", err); + plan_.PrepareQueue(NULL); ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); @@ -467,6 +463,148 @@ TEST_F(PlanTest, PoolWithFailingEdge) { ASSERT_EQ(0, edge); } +TEST_F(PlanTest, PriorityWithoutBuildLog) { + // Without a build log, the critical time is equivalent to graph + // depth. Test with the following graph: + // a2 + // | + // a1 b1 + // | | | + // a0 b0 c0 + // \ | / + // out + + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, + "rule r\n" + " command = unused\n" + "build out: r a0 b0 c0\n" + "build a0: r a1\n" + "build a1: r a2\n" + "build b0: r b1\n" + "build c0: r b1\n" + )); + GetNode("a1")->MarkDirty(); + GetNode("a0")->MarkDirty(); + GetNode("b0")->MarkDirty(); + GetNode("c0")->MarkDirty(); + GetNode("out")->MarkDirty(); + BuildLog log; + PrepareForTarget("out", &log); + + EXPECT_EQ(GetNode("out")->in_edge()->critical_time(), 1); + EXPECT_EQ(GetNode("a0")->in_edge()->critical_time(), 2); + EXPECT_EQ(GetNode("b0")->in_edge()->critical_time(), 2); + EXPECT_EQ(GetNode("c0")->in_edge()->critical_time(), 2); + EXPECT_EQ(GetNode("a1")->in_edge()->critical_time(), 3); + + const int n_edges = 5; + const char *expected_order[n_edges] = { + "a1", "a0", "b0", "c0", "out"}; + for (int i = 0; i < n_edges; ++i) { + Edge* edge = plan_.FindWork(); + ASSERT_NE(edge, NULL); + EXPECT_EQ(expected_order[i], edge->outputs_[0]->path()); + + std::string err; + ASSERT_TRUE(plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err)); + EXPECT_EQ(err, ""); + } + + EXPECT_FALSE(plan_.FindWork()); +} + +TEST_F(PlanTest, PriorityWithBuildLog) { + // With a build log, the critical time is longest weighted path. + // Test with the following graph: + // a2 + // | + // a1 b1 + // | | | + // a0 b0 c0 + // \ | / + // out + + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, + "rule r\n" + " command = unused\n" + "build out: r a0 b0 c0\n" + "build a0: r a1\n" + "build a1: r a2\n" + "build b0: r b1\n" + "build c0: r b1\n" + )); + GetNode("a1")->MarkDirty(); + GetNode("a0")->MarkDirty(); + GetNode("b0")->MarkDirty(); + GetNode("c0")->MarkDirty(); + GetNode("out")->MarkDirty(); + + BuildLog log; + log.RecordCommand(GetNode("out")->in_edge(), 0, 100); // time = 100 + log.RecordCommand(GetNode("a0")->in_edge(), 10, 20); // time = 10 + log.RecordCommand(GetNode("a1")->in_edge(), 20, 40); // time = 20 + log.RecordCommand(GetNode("b0")->in_edge(), 10, 30); // time = 20 + log.RecordCommand(GetNode("c0")->in_edge(), 20, 70); // time = 50 + + PrepareForTarget("out", &log); + + EXPECT_EQ(GetNode("out")->in_edge()->critical_time(), 100); + EXPECT_EQ(GetNode("a0")->in_edge()->critical_time(), 110); + EXPECT_EQ(GetNode("b0")->in_edge()->critical_time(), 120); + EXPECT_EQ(GetNode("c0")->in_edge()->critical_time(), 150); + EXPECT_EQ(GetNode("a1")->in_edge()->critical_time(), 130); + + const int n_edges = 5; + const char *expected_order[n_edges] = { + "c0", "a1", "b0", "a0", "out"}; + for (int i = 0; i < n_edges; ++i) { + Edge* edge = plan_.FindWork(); + ASSERT_NE(edge, NULL); + EXPECT_EQ(expected_order[i], edge->outputs_[0]->path()); + + std::string err; + ASSERT_TRUE(plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err)); + EXPECT_EQ(err, ""); + } + EXPECT_FALSE(plan_.FindWork()); +} + +TEST_F(PlanTest, RuntimePartialBuildLog) { + // Test the edge->run_time_ms() estimate when no build log is available + + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, + "rule r\n" + " command = unused\n" + "build out: r a0 b0 c0 d0\n" + "build a0: r a1\n" + "build b0: r b1\n" + "build c0: r c1\n" + "build d0: r d1\n" + )); + GetNode("a0")->MarkDirty(); + GetNode("b0")->MarkDirty(); + GetNode("c0")->MarkDirty(); + GetNode("d0")->MarkDirty(); + GetNode("out")->MarkDirty(); + + BuildLog log; + log.RecordCommand(GetNode("out")->in_edge(), 0, 100); // time = 40 + log.RecordCommand(GetNode("a0")->in_edge(), 10, 20); // time = 10 + log.RecordCommand(GetNode("b0")->in_edge(), 20, 40); // time = 20 + log.RecordCommand(GetNode("c0")->in_edge(), 10, 40); // time = 30 + + PrepareForTarget("out", &log); + + // These edges times are read from the build log + EXPECT_EQ(GetNode("out")->in_edge()->run_time_ms(), 100); + EXPECT_EQ(GetNode("a0")->in_edge()->run_time_ms(), 10); + EXPECT_EQ(GetNode("b0")->in_edge()->run_time_ms(), 20); + EXPECT_EQ(GetNode("c0")->in_edge()->run_time_ms(), 30); + + // The missing data is taken from the 3rd quintile of known data + EXPECT_EQ(GetNode("d0")->in_edge()->run_time_ms(), 30); +} + /// Fake implementation of CommandRunner, useful for tests. struct FakeCommandRunner : public CommandRunner { explicit FakeCommandRunner(VirtualFileSystem* fs) : From a643af2207f8f090d2a7aefeec954fcd2725c39c Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Tue, 8 Mar 2022 02:44:03 +0000 Subject: [PATCH 031/127] Pool: sort equally-weighted edges by priority --- src/graph.h | 13 ++++++++++--- src/state.h | 5 ++++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/graph.h b/src/graph.h index 728cdc8ae6..4fc34ca91c 100644 --- a/src/graph.h +++ b/src/graph.h @@ -384,14 +384,14 @@ struct DependencyScan { DyndepLoader dyndep_loader_; }; -// Implements a less comarison for edges by priority, where highest +// Implements a less comparison for edges by priority, where highest // priority is defined lexicographically first by largest critical // time, then lowest ID. // // Including ID means that wherever the critical times are the same, // the edges are executed in ascending ID order which was historically // how all tasks were scheduled. -struct EdgePriorityCompare { +struct EdgePriorityLess { bool operator()(const Edge* e1, const Edge* e2) const { const int64_t ct1 = e1->critical_time(); const int64_t ct2 = e2->critical_time(); @@ -402,11 +402,18 @@ struct EdgePriorityCompare { } }; +// Reverse of EdgePriorityLess, e.g. to sort by highest priority first +struct EdgePriorityGreater { + bool operator()(const Edge* e1, const Edge* e2) const { + return EdgePriorityLess()(e2, e1); + } +}; + // A priority queue holding non-owning Edge pointers. top() will // return the edge with the largest critical time, and lowest ID if // more than one edge has the same critical time. class EdgePriorityQueue: - public std::priority_queue, EdgePriorityCompare>{ + public std::priority_queue, EdgePriorityLess>{ public: void clear() { c.clear(); diff --git a/src/state.h b/src/state.h index 2b0fa908e4..05fb50e59f 100644 --- a/src/state.h +++ b/src/state.h @@ -80,7 +80,10 @@ struct Pool { if (!a) return b; if (!b) return false; int weight_diff = a->weight() - b->weight(); - return ((weight_diff < 0) || (weight_diff == 0 && EdgeCmp()(a, b))); + if (weight_diff != 0) { + return weight_diff < 0; + } + return EdgePriorityGreater()(a, b); } }; From f2333b706a080389583e5e355fd339a37089fe2c Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Tue, 8 Mar 2022 04:52:53 +0000 Subject: [PATCH 032/127] Rename critical_time to critical_time_ms --- src/build.cc | 10 +++++----- src/build_test.cc | 20 ++++++++++---------- src/graph.h | 14 +++++++------- src/graph_test.cc | 4 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/build.cc b/src/build.cc index 1f1e153ef9..04be16c7e6 100644 --- a/src/build.cc +++ b/src/build.cc @@ -563,9 +563,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // Add a bias to ensure that targets that appear first in |targets_| have a larger critical time than // those that follow them. E.g. for 3 targets: [2*total_time, total_time, 0]. int64_t priority_weight = (targets_.size() - i - 1) * total_time; - in->set_critical_time( + in->set_critical_time_ms( priority_weight + - std::max(in->run_time_ms(), in->critical_time())); + std::max(in->run_time_ms(), in->critical_time_ms())); if (!seen_edge(in)) { work_queue.push(in); } @@ -585,9 +585,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { continue; } // Only process edge if this node offers a higher critical time - const int64_t proposed_time = e->critical_time() + in->run_time_ms(); - if (proposed_time > in->critical_time()) { - in->set_critical_time(proposed_time); + const int64_t proposed_time = e->critical_time_ms() + in->run_time_ms(); + if (proposed_time > in->critical_time_ms()) { + in->set_critical_time_ms(proposed_time); if (!seen_edge(in)) { work_queue.push(in); } diff --git a/src/build_test.cc b/src/build_test.cc index 50a53948b8..e8518b47b4 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -491,11 +491,11 @@ TEST_F(PlanTest, PriorityWithoutBuildLog) { BuildLog log; PrepareForTarget("out", &log); - EXPECT_EQ(GetNode("out")->in_edge()->critical_time(), 1); - EXPECT_EQ(GetNode("a0")->in_edge()->critical_time(), 2); - EXPECT_EQ(GetNode("b0")->in_edge()->critical_time(), 2); - EXPECT_EQ(GetNode("c0")->in_edge()->critical_time(), 2); - EXPECT_EQ(GetNode("a1")->in_edge()->critical_time(), 3); + EXPECT_EQ(GetNode("out")->in_edge()->critical_time_ms(), 1); + EXPECT_EQ(GetNode("a0")->in_edge()->critical_time_ms(), 2); + EXPECT_EQ(GetNode("b0")->in_edge()->critical_time_ms(), 2); + EXPECT_EQ(GetNode("c0")->in_edge()->critical_time_ms(), 2); + EXPECT_EQ(GetNode("a1")->in_edge()->critical_time_ms(), 3); const int n_edges = 5; const char *expected_order[n_edges] = { @@ -548,11 +548,11 @@ TEST_F(PlanTest, PriorityWithBuildLog) { PrepareForTarget("out", &log); - EXPECT_EQ(GetNode("out")->in_edge()->critical_time(), 100); - EXPECT_EQ(GetNode("a0")->in_edge()->critical_time(), 110); - EXPECT_EQ(GetNode("b0")->in_edge()->critical_time(), 120); - EXPECT_EQ(GetNode("c0")->in_edge()->critical_time(), 150); - EXPECT_EQ(GetNode("a1")->in_edge()->critical_time(), 130); + EXPECT_EQ(GetNode("out")->in_edge()->critical_time_ms(), 100); + EXPECT_EQ(GetNode("a0")->in_edge()->critical_time_ms(), 110); + EXPECT_EQ(GetNode("b0")->in_edge()->critical_time_ms(), 120); + EXPECT_EQ(GetNode("c0")->in_edge()->critical_time_ms(), 150); + EXPECT_EQ(GetNode("a1")->in_edge()->critical_time_ms(), 130); const int n_edges = 5; const char *expected_order[n_edges] = { diff --git a/src/graph.h b/src/graph.h index 4fc34ca91c..0fab807c9c 100644 --- a/src/graph.h +++ b/src/graph.h @@ -171,7 +171,7 @@ struct Edge { Edge() : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone), - id_(0), run_time_ms_(0), critical_time_(-1), outputs_ready_(false), + id_(0), run_time_ms_(0), critical_time_ms_(-1), outputs_ready_(false), deps_loaded_(false), deps_missing_(false), generated_by_dep_loader_(false), implicit_deps_(0), order_only_deps_(0), implicit_outs_(0) {} @@ -201,9 +201,9 @@ struct Edge { // forming the longest time-weighted path to the target output. // This quantity is used as a priority during build scheduling. // NOTE: Defaults to -1 as a marker smaller than any valid time - int64_t critical_time() const { return critical_time_; } - void set_critical_time(int64_t critical_time) { - critical_time_ = critical_time; + int64_t critical_time_ms() const { return critical_time_ms_; } + void set_critical_time_ms(int64_t critical_time_ms) { + critical_time_ms_ = critical_time_ms; } // Run time in ms for this edge's command. @@ -224,7 +224,7 @@ struct Edge { VisitMark mark_; size_t id_; int64_t run_time_ms_; - int64_t critical_time_; + int64_t critical_time_ms_; bool outputs_ready_; bool deps_loaded_; bool deps_missing_; @@ -393,8 +393,8 @@ struct DependencyScan { // how all tasks were scheduled. struct EdgePriorityLess { bool operator()(const Edge* e1, const Edge* e2) const { - const int64_t ct1 = e1->critical_time(); - const int64_t ct2 = e2->critical_time(); + const int64_t ct1 = e1->critical_time_ms(); + const int64_t ct2 = e2->critical_time_ms(); if (ct1 != ct2) { return ct1 < ct2; } diff --git a/src/graph_test.cc b/src/graph_test.cc index d6573873d5..c7efec3284 100644 --- a/src/graph_test.cc +++ b/src/graph_test.cc @@ -965,7 +965,7 @@ TEST_F(GraphTest, EdgeQueuePriority) { // Output is largest critical time to smallest for (int i = 0; i < n_edges; ++i) { - edges[i]->set_critical_time(i * 10); + edges[i]->set_critical_time_ms(i * 10); } EdgePriorityQueue queue; @@ -982,7 +982,7 @@ TEST_F(GraphTest, EdgeQueuePriority) { // When there is ambiguity, the lowest edge id comes first for (int i = 0; i < n_edges; ++i) { - edges[i]->set_critical_time(0); + edges[i]->set_critical_time_ms(0); } queue.push(edges[1]); From 09d4faa0119a5d45911fd5a5b1703e152b040d9a Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Tue, 8 Mar 2022 05:04:43 +0000 Subject: [PATCH 033/127] Clarify the purpose of active_edges in back-propagation --- src/build.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/build.cc b/src/build.cc index 04be16c7e6..9c0dbc2d7e 100644 --- a/src/build.cc +++ b/src/build.cc @@ -553,9 +553,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { // from the destination nodes. // XXX: ignores pools std::queue work_queue; // Queue, for breadth-first traversal - std::set active_edges; // Set of edges in work_queue - SeenBefore seen_edge( - &active_edges); // Test for uniqueness in work_queue + // The set of edges currently in work_queue, to avoid duplicates. + std::set active_edges; + SeenBefore seen_edge(&active_edges); for (size_t i = 0; i < targets_.size(); ++i) { const Node* target = targets_[i]; @@ -575,6 +575,8 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { while (!work_queue.empty()) { Edge* e = work_queue.front(); work_queue.pop(); + // If the critical time of any dependent edges is updated, this + // edge may need to be processed again. So re-allow insertion. active_edges.erase(e); for (std::vector::iterator it = e->inputs_.begin(), From b5fa2d589cd12b34019b8fbc27280ae75635cf2c Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Wed, 9 Mar 2022 13:33:26 -1000 Subject: [PATCH 034/127] Fix function and parameter names, and comment --- configure.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/configure.py b/configure.py index fcc987c2fe..510a70249e 100755 --- a/configure.py +++ b/configure.py @@ -305,15 +305,15 @@ def binary(name): else: n.variable('ar', configure_env.get('AR', 'ar')) -def SearchPath(exe_name): - """Find an executable (.exe, .bat, whatever) in the system path.""" +def search_system_path(file_name): + """Find a file in the system path.""" for dir in os.environ['path'].split(';'): - path = os.path.join(dir, exe_name) + path = os.path.join(dir, file_name) if os.path.exists(path): return path if platform.is_msvc(): - if not SearchPath('cl.exe'): + if not search_system_path('cl.exe'): raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS') cflags = ['/showIncludes', '/nologo', # Don't print startup banner. From ea04cf32c94131d65624b7185b0ce176a35861d1 Mon Sep 17 00:00:00 2001 From: Ken Matsui <26405363+ken-matsui@users.noreply.github.com> Date: Wed, 16 Mar 2022 02:34:27 +0900 Subject: [PATCH 035/127] Avoid shadowing `time_millis_`, the field of `StatusPrinter` --- src/status.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/status.h b/src/status.h index e211ba3487..b2e50ea1aa 100644 --- a/src/status.h +++ b/src/status.h @@ -92,14 +92,14 @@ struct StatusPrinter : Status { double rate() { return rate_; } - void UpdateRate(int update_hint, int64_t time_millis_) { + void UpdateRate(int update_hint, int64_t time_millis) { if (update_hint == last_update_) return; last_update_ = update_hint; if (times_.size() == N) times_.pop(); - times_.push(time_millis_); + times_.push(time_millis); if (times_.back() != times_.front()) rate_ = times_.size() / ((times_.back() - times_.front()) / 1e3); } From 7a95b48be3c0d604936d24565fb91d7b2625885d Mon Sep 17 00:00:00 2001 From: Siyuan Ren Date: Sat, 7 May 2022 18:02:09 +0800 Subject: [PATCH 036/127] Really respect the env variable `CLICOLOR_FORCE`. Prior to this commit, ninja disables color output even if CLICOLOR_FORCE is set, if it fails to set VT processing on Windows terminal. This does not respect the user's wish. One use case of this is to force color on MSYS terminal. --- src/line_printer.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/line_printer.cc b/src/line_printer.cc index a3d0528606..f510f523e5 100644 --- a/src/line_printer.cc +++ b/src/line_printer.cc @@ -46,10 +46,6 @@ LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) { } #endif supports_color_ = smart_terminal_; - if (!supports_color_) { - const char* clicolor_force = getenv("CLICOLOR_FORCE"); - supports_color_ = clicolor_force && string(clicolor_force) != "0"; - } #ifdef _WIN32 // Try enabling ANSI escape sequence support on Windows 10 terminals. if (supports_color_) { @@ -61,6 +57,10 @@ LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) { } } #endif + if (!supports_color_) { + const char* clicolor_force = getenv("CLICOLOR_FORCE"); + supports_color_ = clicolor_force && string(clicolor_force) != "0"; + } } void LinePrinter::Print(string to_print, LineType type) { From 99c1bc7442ff3109c8b91fb98b4a252045623296 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Sun, 15 May 2022 17:52:42 +0200 Subject: [PATCH 037/127] doc: Add available since 1.11 to Validations --- doc/manual.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc index 2062a2a210..214dca4a57 100644 --- a/doc/manual.asciidoc +++ b/doc/manual.asciidoc @@ -1046,6 +1046,9 @@ relative path, pointing to the same file, are considered different by Ninja. [[validations]] Validations ~~~~~~~~~~~ + +_Available since Ninja 1.11._ + Validations listed on the build line cause the specified files to be added to the top level of the build graph (as if they were specified on the Ninja command line) whenever the build line is a transitive From 29e66e24199113535195d06a228a7e0e0b229c94 Mon Sep 17 00:00:00 2001 From: Gregor Jasny Date: Tue, 17 May 2022 16:51:46 +0200 Subject: [PATCH 038/127] chore: fix build warning --- src/ninja.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ninja.cc b/src/ninja.cc index 2b71eb170f..834e2846ee 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -1400,7 +1400,7 @@ class DeferGuessParallelism { BuildConfig* config; DeferGuessParallelism(BuildConfig* config) - : config(config), needGuess(true) {} + : needGuess(true), config(config) {} void Refresh() { if (needGuess) { From 8e0af0809cd8e5403ad0410e6fd75c94934ca75d Mon Sep 17 00:00:00 2001 From: Ken Matsui <26405363+ken-matsui@users.noreply.github.com> Date: Tue, 31 May 2022 22:25:47 +0900 Subject: [PATCH 039/127] Support building `libninja-re2c.a` on `configure.py` --- configure.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/configure.py b/configure.py index 43904349a8..f24d1f8a61 100755 --- a/configure.py +++ b/configure.py @@ -489,16 +489,27 @@ def has_re2c(): "changes to src/*.in.cc will not affect your build.") n.newline() -n.comment('Core source files all build into ninja library.') cxxvariables = [] if platform.is_msvc(): cxxvariables = [('pdb', 'ninja.pdb')] + +n.comment('Generate a library for `ninja-re2c`.') +re2c_objs = [] +for name in ['depfile_parser', 'lexer']: + re2c_objs += cxx(name, variables=cxxvariables) +if platform.is_msvc(): + n.build(built('ninja-re2c.lib'), 'ar', re2c_objs) +else: + n.build(built('libninja-re2c.a'), 'ar', re2c_objs) +n.newline() + +n.comment('Core source files all build into ninja library.') +objs.extend(re2c_objs) for name in ['build', 'build_log', 'clean', 'clparser', 'debug_flags', - 'depfile_parser', 'deps_log', 'disk_interface', 'dyndep', @@ -508,7 +519,6 @@ def has_re2c(): 'graph', 'graphviz', 'json', - 'lexer', 'line_printer', 'manifest_parser', 'metrics', From 23e6bf5c41e17e20872e8c7b2a7cfb86a0db5e1a Mon Sep 17 00:00:00 2001 From: Orgad Shaneh Date: Tue, 7 Jun 2022 09:35:44 +0300 Subject: [PATCH 040/127] Flush output after each line The output is not flushed automatically after \n, at least on Windows 10. --- src/line_printer.cc | 1 + src/ninja.cc | 1 + 2 files changed, 2 insertions(+) diff --git a/src/line_printer.cc b/src/line_printer.cc index a3d0528606..fa97447a9d 100644 --- a/src/line_printer.cc +++ b/src/line_printer.cc @@ -118,6 +118,7 @@ void LinePrinter::Print(string to_print, LineType type) { have_blank_line_ = false; } else { printf("%s\n", to_print.c_str()); + fflush(stdout); } } diff --git a/src/ninja.cc b/src/ninja.cc index 834e2846ee..d1f86ea794 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -672,6 +672,7 @@ int NinjaMain::ToolRules(const Options* options, int argc, char* argv[]) { } } printf("\n"); + fflush(stdout); } return 0; } From 57b8fee639a4290176086f3839c78bfc0d02c42b Mon Sep 17 00:00:00 2001 From: Ken Matsui <26405363+ken-matsui@users.noreply.github.com> Date: Sat, 28 May 2022 22:26:55 +0900 Subject: [PATCH 041/127] Add an option to avoid building binary when used as a library --- CMakeLists.txt | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 70fc5e99f0..22b815886c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,8 @@ cmake_minimum_required(VERSION 3.15) include(CheckSymbolExists) include(CheckIPOSupported) +option(NINJA_BUILD_BINARY "Build ninja binary" ON) + project(ninja) # --- optional link-time optimization @@ -148,11 +150,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX") endif() # Main executable is library plus main() function. -add_executable(ninja src/ninja.cc) -target_link_libraries(ninja PRIVATE libninja libninja-re2c) +if(NINJA_BUILD_BINARY) + add_executable(ninja src/ninja.cc) + target_link_libraries(ninja PRIVATE libninja libninja-re2c) -if(WIN32) - target_sources(ninja PRIVATE windows/ninja.manifest) + if(WIN32) + target_sources(ninja PRIVATE windows/ninja.manifest) + endif() endif() # Adds browse mode into the ninja binary if it's supported by the host platform. @@ -171,8 +175,10 @@ if(platform_supports_ninja_browse) VERBATIM ) - target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE) - target_sources(ninja PRIVATE src/browse.cc) + if(NINJA_BUILD_BINARY) + target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE) + target_sources(ninja PRIVATE src/browse.cc) + endif() set_source_files_properties(src/browse.cc PROPERTIES OBJECT_DEPENDS "${PROJECT_BINARY_DIR}/build/browse_py.h" @@ -232,4 +238,6 @@ if(BUILD_TESTING) add_test(NAME NinjaTest COMMAND ninja_test) endif() -install(TARGETS ninja) +if(NINJA_BUILD_BINARY) + install(TARGETS ninja) +endif() From a2b5e6deff1545f5ca1947930fa59fa3ff236db7 Mon Sep 17 00:00:00 2001 From: John Drouhard Date: Fri, 26 Mar 2021 12:07:21 -0500 Subject: [PATCH 042/127] Introduce mechanism to provide resiliency for inputs changing while the build runs When an edge starts to run, create a temporary lock file in the build directory, stat it, and cache its mtime. When the command finishes, use the temporary lock file's mtime from when the edge started running as the mtime that is recorded in the build log for each of the edge's output(s). Subsequent runs will use that as the mtime for the output(s). This provides robustness against inputs changing while the command itself is running. If an input is changed, the subsequent run will detect the output as dirty since its recorded mtime reflects when the build command began, not when the output was actually written to disk. Generator and restat rules are exempt from this and will continue to record their actual mtime on disk at the time the command finished in the build log (unless the restat rule cleans the output). This avoids potential infinite loops when the generator rule touches input dependencies of the output(s) or a restat rule intentionally changes implicit dependencies of its output. --- src/build.cc | 88 +++++----- src/build.h | 1 + src/build_log.cc | 10 +- src/build_log.h | 2 +- src/build_test.cc | 320 ++++++++++++++++++++++++++++++++++++- src/disk_interface_test.cc | 2 +- src/graph.cc | 49 +++--- src/graph.h | 4 +- 8 files changed, 396 insertions(+), 80 deletions(-) diff --git a/src/build.cc b/src/build.cc index 6f11ed7a3c..76ff93af03 100644 --- a/src/build.cc +++ b/src/build.cc @@ -518,6 +518,10 @@ Builder::Builder(State* state, const BuildConfig& config, start_time_millis_(start_time_millis), disk_interface_(disk_interface), scan_(state, build_log, deps_log, disk_interface, &config_.depfile_parser_options) { + lock_file_path_ = ".ninja_lock"; + string build_dir = state_->bindings_.LookupVariable("builddir"); + if (!build_dir.empty()) + lock_file_path_ = build_dir + "/" + lock_file_path_; } Builder::~Builder() { @@ -552,6 +556,10 @@ void Builder::Cleanup() { disk_interface_->RemoveFile(depfile); } } + + string err; + if (disk_interface_->Stat(lock_file_path_, &err) > 0) + disk_interface_->RemoveFile(lock_file_path_); } Node* Builder::AddTarget(const string& name, string* err) { @@ -704,14 +712,25 @@ bool Builder::StartEdge(Edge* edge, string* err) { status_->BuildEdgeStarted(edge, start_time_millis); - // Create directories necessary for outputs. + TimeStamp build_start = -1; + + // Create directories necessary for outputs and remember the current + // filesystem mtime to record later // XXX: this will block; do we care? for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (!disk_interface_->MakeDirs((*o)->path())) return false; + if (build_start == -1) { + disk_interface_->WriteFile(lock_file_path_, ""); + build_start = disk_interface_->Stat(lock_file_path_, err); + if (build_start == -1) + build_start = 0; + } } + edge->command_start_time_ = build_start; + // Create response file, if needed // XXX: this may also block; do we care? string rspfile = edge->GetUnescapedRspfile(); @@ -770,55 +789,42 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { } // Restat the edge outputs - TimeStamp output_mtime = 0; - bool restat = edge->GetBindingBool("restat"); + TimeStamp record_mtime = 0; if (!config_.dry_run) { + const bool restat = edge->GetBindingBool("restat"); + const bool generator = edge->GetBindingBool("generator"); bool node_cleaned = false; - - for (vector::iterator o = edge->outputs_.begin(); - o != edge->outputs_.end(); ++o) { - TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err); - if (new_mtime == -1) - return false; - if (new_mtime > output_mtime) - output_mtime = new_mtime; - if ((*o)->mtime() == new_mtime && restat) { - // The rule command did not change the output. Propagate the clean - // state through the build graph. - // Note that this also applies to nonexistent outputs (mtime == 0). - if (!plan_.CleanNode(&scan_, *o, err)) + record_mtime = edge->command_start_time_; + + // restat and generator rules must restat the outputs after the build + // has finished. if record_mtime == 0, then there was an error while + // attempting to touch/stat the temp file when the edge started and + // we should fall back to recording the outputs' current mtime in the + // log. + if (record_mtime == 0 || restat || generator) { + for (vector::iterator o = edge->outputs_.begin(); + o != edge->outputs_.end(); ++o) { + TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err); + if (new_mtime == -1) return false; - node_cleaned = true; + if (new_mtime > record_mtime) + record_mtime = new_mtime; + if ((*o)->mtime() == new_mtime && restat) { + // The rule command did not change the output. Propagate the clean + // state through the build graph. + // Note that this also applies to nonexistent outputs (mtime == 0). + if (!plan_.CleanNode(&scan_, *o, err)) + return false; + node_cleaned = true; + } } } - if (node_cleaned) { - TimeStamp restat_mtime = 0; - // If any output was cleaned, find the most recent mtime of any - // (existing) non-order-only input or the depfile. - for (vector::iterator i = edge->inputs_.begin(); - i != edge->inputs_.end() - edge->order_only_deps_; ++i) { - TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err); - if (input_mtime == -1) - return false; - if (input_mtime > restat_mtime) - restat_mtime = input_mtime; - } - - string depfile = edge->GetUnescapedDepfile(); - if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) { - TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err); - if (depfile_mtime == -1) - return false; - if (depfile_mtime > restat_mtime) - restat_mtime = depfile_mtime; - } + record_mtime = edge->command_start_time_; // The total number of edges in the plan may have changed as a result // of a restat. status_->PlanHasTotalEdges(plan_.command_edge_count()); - - output_mtime = restat_mtime; } } @@ -832,7 +838,7 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { if (scan_.build_log()) { if (!scan_.build_log()->RecordCommand(edge, start_time_millis, - end_time_millis, output_mtime)) { + end_time_millis, record_mtime)) { *err = string("Error writing to build log: ") + strerror(errno); return false; } diff --git a/src/build.h b/src/build.h index d697dfb89e..d727a8a480 100644 --- a/src/build.h +++ b/src/build.h @@ -234,6 +234,7 @@ struct Builder { /// Time the build started. int64_t start_time_millis_; + std::string lock_file_path_; DiskInterface* disk_interface_; DependencyScan scan_; diff --git a/src/build_log.cc b/src/build_log.cc index 4dcd6cee53..b35279d410 100644 --- a/src/build_log.cc +++ b/src/build_log.cc @@ -116,9 +116,9 @@ BuildLog::LogEntry::LogEntry(const string& output) : output(output) {} BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash, - int start_time, int end_time, TimeStamp restat_mtime) + int start_time, int end_time, TimeStamp mtime) : output(output), command_hash(command_hash), - start_time(start_time), end_time(end_time), mtime(restat_mtime) + start_time(start_time), end_time(end_time), mtime(mtime) {} BuildLog::BuildLog() @@ -303,7 +303,7 @@ LoadStatus BuildLog::Load(const string& path, string* err) { *end = 0; int start_time = 0, end_time = 0; - TimeStamp restat_mtime = 0; + TimeStamp mtime = 0; start_time = atoi(start); start = end + 1; @@ -319,7 +319,7 @@ LoadStatus BuildLog::Load(const string& path, string* err) { if (!end) continue; *end = 0; - restat_mtime = strtoll(start, NULL, 10); + mtime = strtoll(start, NULL, 10); start = end + 1; end = (char*)memchr(start, kFieldSeparator, line_end - start); @@ -343,7 +343,7 @@ LoadStatus BuildLog::Load(const string& path, string* err) { entry->start_time = start_time; entry->end_time = end_time; - entry->mtime = restat_mtime; + entry->mtime = mtime; if (log_version >= 5) { char c = *end; *end = '\0'; entry->command_hash = (uint64_t)strtoull(start, NULL, 16); diff --git a/src/build_log.h b/src/build_log.h index 88551e3217..dd72c4c772 100644 --- a/src/build_log.h +++ b/src/build_log.h @@ -73,7 +73,7 @@ struct BuildLog { explicit LogEntry(const std::string& output); LogEntry(const std::string& output, uint64_t command_hash, - int start_time, int end_time, TimeStamp restat_mtime); + int start_time, int end_time, TimeStamp mtime); }; /// Lookup a previously-run command by its output path. diff --git a/src/build_test.cc b/src/build_test.cc index 4ef62b2113..3908761057 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -611,6 +611,7 @@ bool FakeCommandRunner::StartCommand(Edge* edge) { fs_->WriteFile(edge->outputs_[0]->path(), content); } else if (edge->rule().name() == "touch-implicit-dep-out") { string dep = edge->GetBinding("test_dependency"); + fs_->Tick(); fs_->Create(dep, ""); fs_->Tick(); for (vector::iterator out = edge->outputs_.begin(); @@ -627,7 +628,12 @@ bool FakeCommandRunner::StartCommand(Edge* edge) { fs_->Create(dep, ""); } else if (edge->rule().name() == "generate-depfile") { string dep = edge->GetBinding("test_dependency"); + bool touch_dep = edge->GetBindingBool("touch_dependency"); string depfile = edge->GetUnescapedDepfile(); + if (touch_dep) { + fs_->Tick(); + fs_->Create(dep, ""); + } string contents; for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { @@ -635,6 +641,20 @@ bool FakeCommandRunner::StartCommand(Edge* edge) { fs_->Create((*out)->path(), ""); } fs_->Create(depfile, contents); + } else if (edge->rule().name() == "long-cc") { + string dep = edge->GetBinding("test_dependency"); + string depfile = edge->GetUnescapedDepfile(); + string contents; + for (vector::iterator out = edge->outputs_.begin(); + out != edge->outputs_.end(); ++out) { + fs_->Tick(); + fs_->Tick(); + fs_->Tick(); + fs_->Create((*out)->path(), ""); + contents += (*out)->path() + ": " + dep + "\n"; + } + if (!dep.empty() && !depfile.empty()) + fs_->Create(depfile, contents); } else { printf("unknown command\n"); return false; @@ -690,6 +710,18 @@ bool FakeCommandRunner::WaitForCommand(Result* result) { else result->status = ExitSuccess; + // This rule simulates an external process modifying files while the build command runs. + // See TestInputMtimeRaceCondition and TestInputMtimeRaceConditionWithDepFile. + // Note: only the first and third time the rule is run per test is the file modified, so + // the test can verify that subsequent runs without the race have no work to do. + if (edge->rule().name() == "long-cc") { + string dep = edge->GetBinding("test_dependency"); + if (fs_->now_ == 4) + fs_->files_[dep].mtime = 3; + if (fs_->now_ == 10) + fs_->files_[dep].mtime = 9; + } + // Provide a way for test cases to verify when an edge finishes that // some other edge is still active. This is useful for test cases // covering behavior involving multiple active edges. @@ -1471,7 +1503,7 @@ TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate) { TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate2) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch-implicit-dep-out\n" -" command = touch $test_dependency ; sleep 1 ; touch $out\n" +" command = sleep 1 ; touch $test_dependency ; sleep 1 ; touch $out\n" " generator = 1\n" "build out.imp: touch-implicit-dep-out | inimp inimp2\n" " test_dependency = inimp\n")); @@ -1497,6 +1529,29 @@ TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate2) { EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); EXPECT_TRUE(builder_.AlreadyUpToDate()); EXPECT_FALSE(GetNode("out.imp")->dirty()); + + command_runner_.commands_ran_.clear(); + state_.Reset(); + builder_.Cleanup(); + builder_.plan_.Reset(); + + fs_.Tick(); + fs_.Create("inimp", ""); + + EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); + EXPECT_FALSE(builder_.AlreadyUpToDate()); + + EXPECT_TRUE(builder_.Build(&err)); + EXPECT_TRUE(builder_.AlreadyUpToDate()); + + command_runner_.commands_ran_.clear(); + state_.Reset(); + builder_.Cleanup(); + builder_.plan_.Reset(); + + EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); + EXPECT_TRUE(builder_.AlreadyUpToDate()); + EXPECT_FALSE(GetNode("out.imp")->dirty()); } TEST_F(BuildWithLogTest, NotInLogButOnDisk) { @@ -1800,6 +1855,52 @@ TEST_F(BuildWithLogTest, RestatMissingInput) { ASSERT_EQ(restat_mtime, log_entry->mtime); } +TEST_F(BuildWithLogTest, RestatInputChangesDueToRule) { + ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, +"rule generate-depfile\n" +" command = sleep 1 ; touch $touch_dependency; touch $out ; echo \"$out: $test_dependency\" > $depfile\n" +"build out1: generate-depfile || cat1\n" +" test_dependency = in2\n" +" touch_dependency = 1\n" +" restat = 1\n" +" depfile = out.d\n")); + + // Perform the first build. out1 is a restat rule, so its recorded mtime in the build + // log should be the time the command completes, not the time the command started. One + // of out1's discovered dependencies will have a newer mtime than when out1 started + // running, due to its command touching the dependency itself. + string err; + EXPECT_TRUE(builder_.AddTarget("out1", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder_.Build(&err)); + ASSERT_EQ("", err); + EXPECT_EQ(2u, command_runner_.commands_ran_.size()); + EXPECT_EQ(2u, builder_.plan_.command_edge_count()); + BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1"); + ASSERT_TRUE(NULL != log_entry); + ASSERT_EQ(2u, log_entry->mtime); + + command_runner_.commands_ran_.clear(); + state_.Reset(); + builder_.Cleanup(); + builder_.plan_.Reset(); + + fs_.Tick(); + fs_.Create("in1", ""); + + // Touching a dependency of an order-only dependency of out1 should not cause out1 to + // rebuild. If out1 were not a restat rule, then it would rebuild here because its + // recorded mtime would have been an earlier mtime than its most recent input's (in2) + // mtime + EXPECT_TRUE(builder_.AddTarget("out1", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(!state_.GetNode("out1", 0)->dirty()); + EXPECT_TRUE(builder_.Build(&err)); + ASSERT_EQ("", err); + EXPECT_EQ(1u, command_runner_.commands_ran_.size()); + EXPECT_EQ(1u, builder_.plan_.command_edge_count()); +} + TEST_F(BuildWithLogTest, GeneratedPlainDepfileMtime) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule generate-depfile\n" @@ -1904,10 +2005,11 @@ TEST_F(BuildTest, RspFileSuccess) EXPECT_TRUE(builder_.Build(&err)); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); - // The RSP files were created - ASSERT_EQ(files_created + 2, fs_.files_created_.size()); + // The RSP files and temp file to acquire output mtimes were created + ASSERT_EQ(files_created + 3, fs_.files_created_.size()); ASSERT_EQ(1u, fs_.files_created_.count("out 2.rsp")); ASSERT_EQ(1u, fs_.files_created_.count("out 3.rsp")); + ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock")); // The RSP files were removed ASSERT_EQ(files_removed + 2, fs_.files_removed_.size()); @@ -1941,9 +2043,10 @@ TEST_F(BuildTest, RspFileFailure) { ASSERT_EQ("subcommand failed", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); - // The RSP file was created - ASSERT_EQ(files_created + 1, fs_.files_created_.size()); + // The RSP file and temp file to acquire output mtimes were created + ASSERT_EQ(files_created + 2, fs_.files_created_.size()); ASSERT_EQ(1u, fs_.files_created_.count("out.rsp")); + ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock")); // The RSP file was NOT removed ASSERT_EQ(files_removed, fs_.files_removed_.size()); @@ -2522,6 +2625,210 @@ TEST_F(BuildWithDepsLogTest, DepsIgnoredInDryRun) { builder.command_runner_.release(); } +TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceCondition) { + string err; + const char* manifest = + "rule long-cc\n" + " command = long-cc\n" + "build out: long-cc in1\n" + " test_dependency = in1\n"; + + State state; + ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); + ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); + + BuildLog build_log; + ASSERT_TRUE(build_log.Load("build_log", &err)); + ASSERT_TRUE(build_log.OpenForWrite("build_log", *this, &err)); + + DepsLog deps_log; + ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + + BuildLog::LogEntry* log_entry = NULL; + { + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + // Run the build, out gets built, dep file is created + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.Build(&err)); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); + + // See that an entry in the logfile is created. the input_mtime is 1 since that was + // the mtime of in1 when the command was started + log_entry = build_log.LookupByOutput("out"); + ASSERT_TRUE(NULL != log_entry); + ASSERT_EQ(1u, log_entry->mtime); + + builder.command_runner_.release(); + } + + { + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + // Trigger the build again - "out" should rebuild despite having a newer mtime than + // "in1", since "in1" was touched during the build of out (simulated by changing its + // mtime in the the test builder's WaitForCommand() which runs before FinishCommand() + command_runner_.commands_ran_.clear(); + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.Build(&err)); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); + + // Check that the logfile entry is still correct + log_entry = build_log.LookupByOutput("out"); + ASSERT_TRUE(NULL != log_entry); + ASSERT_TRUE(fs_.files_["in1"].mtime < log_entry->mtime); + builder.command_runner_.release(); + } + + { + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + // And a subsequent run should not have any work to do + command_runner_.commands_ran_.clear(); + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.AlreadyUpToDate()); + + builder.command_runner_.release(); + } +} + +TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceConditionWithDepFile) { + string err; + const char* manifest = + "rule long-cc\n" + " command = long-cc\n" + "build out: long-cc\n" + " deps = gcc\n" + " depfile = out.d\n" + " test_dependency = header.h\n"; + + fs_.Create("header.h", ""); + + State state; + ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); + + BuildLog build_log; + ASSERT_TRUE(build_log.Load("build_log", &err)); + ASSERT_TRUE(build_log.OpenForWrite("build_log", *this, &err)); + + DepsLog deps_log; + ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + + { + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + + // Run the build, out gets built, dep file is created + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.Build(&err)); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); + + // See that an entry in the logfile is created. the mtime is 1 due to the command + // starting when the file system's mtime was 1. + BuildLog::LogEntry* log_entry = build_log.LookupByOutput("out"); + ASSERT_TRUE(NULL != log_entry); + ASSERT_EQ(1u, log_entry->mtime); + + builder.command_runner_.release(); + } + + { + // Trigger the build again - "out" will rebuild since its newest input mtime (header.h) + // is newer than the recorded mtime of out in the build log + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.Build(&err)); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); + + builder.command_runner_.release(); + } + + { + // Trigger the build again - "out" won't rebuild since the file wasn't updated during + // the previous build + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + ASSERT_TRUE(builder.AlreadyUpToDate()); + + builder.command_runner_.release(); + } + + // touch the header to trigger a rebuild + fs_.Create("header.h", ""); + ASSERT_EQ(fs_.now_, 7); + + { + // Rebuild. This time, long-cc will cause header.h to be updated while the build is + // in progress + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.Build(&err)); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); + + builder.command_runner_.release(); + } + + { + // Rebuild. Because header.h is now in the deplog for out, it should be detectable as + // a change-while-in-progress and should cause a rebuild of out. + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.Build(&err)); + ASSERT_EQ(1u, command_runner_.commands_ran_.size()); + + builder.command_runner_.release(); + } + + { + // This time, the header.h file was not updated during the build, so the target should + // not be considered dirty. + Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); + builder.command_runner_.reset(&command_runner_); + command_runner_.commands_ran_.clear(); + + state.Reset(); + EXPECT_TRUE(builder.AddTarget("out", &err)); + ASSERT_EQ("", err); + EXPECT_TRUE(builder.AlreadyUpToDate()); + + builder.command_runner_.release(); + } +} + /// Check that a restat rule generating a header cancels compilations correctly. TEST_F(BuildTest, RestatDepfileDependency) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, @@ -3042,9 +3349,10 @@ TEST_F(BuildTest, DyndepBuild) { ASSERT_EQ(2u, fs_.files_read_.size()); EXPECT_EQ("dd-in", fs_.files_read_[0]); EXPECT_EQ("dd", fs_.files_read_[1]); - ASSERT_EQ(2u + files_created, fs_.files_created_.size()); + ASSERT_EQ(3u + files_created, fs_.files_created_.size()); EXPECT_EQ(1u, fs_.files_created_.count("dd")); EXPECT_EQ(1u, fs_.files_created_.count("out")); + EXPECT_EQ(1u, fs_.files_created_.count(".ninja_lock")); } TEST_F(BuildTest, DyndepBuildSyntaxError) { diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc index 5e952edde5..7041d98400 100644 --- a/src/disk_interface_test.cc +++ b/src/disk_interface_test.cc @@ -198,7 +198,7 @@ TEST_F(DiskInterfaceTest, MakeDirs) { EXPECT_EQ(0, fclose(f)); #ifdef _WIN32 string path2 = "another\\with\\back\\\\slashes\\"; - EXPECT_TRUE(disk_.MakeDirs(path2.c_str())); + EXPECT_TRUE(disk_.MakeDirs(path2)); FILE* f2 = fopen((path2 + "a_file").c_str(), "w"); EXPECT_TRUE(f2); EXPECT_EQ(0, fclose(f2)); diff --git a/src/graph.cc b/src/graph.cc index 43ba45ae3d..041199a37f 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -298,37 +298,34 @@ bool DependencyScan::RecomputeOutputDirty(const Edge* edge, return false; } - BuildLog::LogEntry* entry = 0; - // Dirty if we're missing the output. if (!output->exists()) { EXPLAIN("output %s doesn't exist", output->path().c_str()); return true; } - // Dirty if the output is older than the input. - if (most_recent_input && output->mtime() < most_recent_input->mtime()) { - TimeStamp output_mtime = output->mtime(); - - // If this is a restat rule, we may have cleaned the output with a restat - // rule in a previous run and stored the most recent input mtime in the - // build log. Use that mtime instead, so that the file will only be - // considered dirty if an input was modified since the previous run. - bool used_restat = false; - if (edge->GetBindingBool("restat") && build_log() && - (entry = build_log()->LookupByOutput(output->path()))) { - output_mtime = entry->mtime; - used_restat = true; - } + BuildLog::LogEntry* entry = 0; - if (output_mtime < most_recent_input->mtime()) { - EXPLAIN("%soutput %s older than most recent input %s " - "(%" PRId64 " vs %" PRId64 ")", - used_restat ? "restat of " : "", output->path().c_str(), - most_recent_input->path().c_str(), - output_mtime, most_recent_input->mtime()); - return true; - } + // If this is a restat rule, we may have cleaned the output in a + // previous run and stored the command start time in the build log. + // We don't want to consider a restat rule's outputs as dirty unless + // an input changed since the last run, so we'll skip checking the + // output file's actual mtime and simply check the recorded mtime from + // the log against the most recent input's mtime (see below) + bool used_restat = false; + if (edge->GetBindingBool("restat") && build_log() && + (entry = build_log()->LookupByOutput(output->path()))) { + used_restat = true; + } + + // Dirty if the output is older than the input. + if (!used_restat && most_recent_input && output->mtime() < most_recent_input->mtime()) { + EXPLAIN("output %s older than most recent input %s " + "(%" PRId64 " vs %" PRId64 ")", + output->path().c_str(), + most_recent_input->path().c_str(), + output->mtime(), most_recent_input->mtime()); + return true; } if (build_log()) { @@ -346,7 +343,9 @@ bool DependencyScan::RecomputeOutputDirty(const Edge* edge, // May also be dirty due to the mtime in the log being older than the // mtime of the most recent input. This can occur even when the mtime // on disk is newer if a previous run wrote to the output file but - // exited with an error or was interrupted. + // exited with an error or was interrupted. If this was a restat rule, + // then we only check the recorded mtime against the most recent input + // mtime and ignore the actual output's mtime above. EXPLAIN("recorded mtime of %s older than most recent input %s (%" PRId64 " vs %" PRId64 ")", output->path().c_str(), most_recent_input->path().c_str(), entry->mtime, most_recent_input->mtime()); diff --git a/src/graph.h b/src/graph.h index 9de67d2718..d07a9b7639 100644 --- a/src/graph.h +++ b/src/graph.h @@ -172,7 +172,8 @@ struct Edge { : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone), id_(0), outputs_ready_(false), deps_loaded_(false), deps_missing_(false), generated_by_dep_loader_(false), - implicit_deps_(0), order_only_deps_(0), implicit_outs_(0) {} + command_start_time_(0), implicit_deps_(0), order_only_deps_(0), + implicit_outs_(0) {} /// Return true if all inputs' in-edges are ready. bool AllInputsReady() const; @@ -211,6 +212,7 @@ struct Edge { bool deps_loaded_; bool deps_missing_; bool generated_by_dep_loader_; + TimeStamp command_start_time_; const Rule& rule() const { return *rule_; } Pool* pool() const { return pool_; } From 996dd01ba6569d17888fd29f4bfd2a2d751ee467 Mon Sep 17 00:00:00 2001 From: Siyuan Ren Date: Sat, 11 Jun 2022 11:41:56 +0800 Subject: [PATCH 043/127] Qualify `std::string`. --- src/line_printer.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/line_printer.cc b/src/line_printer.cc index f510f523e5..54ba883c24 100644 --- a/src/line_printer.cc +++ b/src/line_printer.cc @@ -59,7 +59,7 @@ LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) { #endif if (!supports_color_) { const char* clicolor_force = getenv("CLICOLOR_FORCE"); - supports_color_ = clicolor_force && string(clicolor_force) != "0"; + supports_color_ = clicolor_force && std::string(clicolor_force) != "0"; } } From c136c2e1df1d8e3340f08b7d33aef7a4c835349c Mon Sep 17 00:00:00 2001 From: Eisuke Kawashima Date: Thu, 12 May 2022 06:05:21 +0900 Subject: [PATCH 044/127] improve zsh-completion - add `ninja` prefix to functions - improve completion of `-d` and `-t` - stop completion if `-h`, `--help`, or `--version` is supplied - add missing `--verbose` options --- misc/zsh-completion | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/misc/zsh-completion b/misc/zsh-completion index 4cee3b8631..d42dac220c 100644 --- a/misc/zsh-completion +++ b/misc/zsh-completion @@ -16,7 +16,7 @@ # Add the following to your .zshrc to tab-complete ninja targets # fpath=(path/to/ninja/misc/zsh-completion $fpath) -__get_targets() { +(( $+functions[_ninja-get-targets] )) || _ninja-get-targets() { dir="." if [ -n "${opt_args[-C]}" ]; then @@ -31,42 +31,44 @@ __get_targets() { eval ${targets_command} 2>/dev/null | cut -d: -f1 } -__get_tools() { - ninja -t list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 +(( $+functions[_ninja-get-tools] )) || _ninja-get-tools() { + # remove the first line; remove the leading spaces; replace spaces with colon + ninja -t list 2> /dev/null | sed -e '1d;s/^ *//;s/ \+/:/' } -__get_modes() { - ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d' +(( $+functions[_ninja-get-modes] )) || _ninja-get-modes() { + # remove the first line; remove the last line; remove the leading spaces; replace spaces with colon + ninja -d list 2> /dev/null | sed -e '1d;$d;s/^ *//;s/ \+/:/' } -__modes() { +(( $+functions[_ninja-modes] )) || _ninja-modes() { local -a modes - modes=(${(fo)"$(__get_modes)"}) + modes=(${(fo)"$(_ninja-get-modes)"}) _describe 'modes' modes } -__tools() { +(( $+functions[_ninja-tools] )) || _ninja-tools() { local -a tools - tools=(${(fo)"$(__get_tools)"}) + tools=(${(fo)"$(_ninja-get-tools)"}) _describe 'tools' tools } -__targets() { +(( $+functions[_ninja-targets] )) || _ninja-targets() { local -a targets - targets=(${(fo)"$(__get_targets)"}) + targets=(${(fo)"$(_ninja-get-targets)"}) _describe 'targets' targets } _arguments \ - {-h,--help}'[Show help]' \ - '--version[Print ninja version]' \ + '(- *)'{-h,--help}'[Show help]' \ + '(- *)--version[Print ninja version]' \ '-C+[Change to directory before doing anything else]:directories:_directories' \ '-f+[Specify input build file (default=build.ninja)]:files:_files' \ '-j+[Run N jobs in parallel (default=number of CPUs available)]:number of jobs' \ '-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \ '-k+[Keep going until N jobs fail (default=1)]:number of jobs' \ '-n[Dry run (do not run commands but act like they succeeded)]' \ - '-v[Show all command lines while building]' \ - '-d+[Enable debugging (use -d list to list modes)]:modes:__modes' \ - '-t+[Run a subtool (use -t list to list subtools)]:tools:__tools' \ - '*::targets:__targets' + '(-v --verbose)'{-v,--verbose}'[Show all command lines while building]' \ + '-d+[Enable debugging (use -d list to list modes)]:modes:_ninja-modes' \ + '-t+[Run a subtool (use -t list to list subtools)]:tools:_ninja-tools' \ + '*::targets:_ninja-targets' From a4c24a33c1ed32d9d51c8df763ec6ad574587d02 Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Thu, 16 Jun 2022 00:47:21 -0700 Subject: [PATCH 045/127] Build ninja with C++11 (#2089) * Build ninja with C++11 In order to allow future use of std::chrono to make the stats code portable it is desirable to compile with C++11. Doing so also allows use of std::unordered_map, and reduces the number of #ifdefs in the ninja source code. Switching to C++11 requires modifying both CMakeLists.txt and configure.py, for MSVC and for other build systems. For MSVC the required change is adding /Zc:__cplusplus to tell the compiler to give a more accurate value for the __cplusplus macro. For other platforms the change is to add -std=c++11 or the CMake equivalent. This change makes some progress towards resolving issue #2004. * Delete code and instructions C++11 guarantees that string::data() gives null-terminated pointers, so explicitly adding a null terminator is no longer needed. The Google C++ Style Guide already recommends avoiding unnecessary use of C++14 and C++17 so repeating this in CONTRIBUTING.md is not critical. These changes both came from PR-review suggestions. * Only set cxx_std_11 if standard is 98 * Return to unconditional target_compile_features use After much discussion it sounds like using target_compile_features unconditionally is best. --- CMakeLists.txt | 4 ++++ CONTRIBUTING.md | 3 --- configure.py | 4 ++++ src/graph.cc | 4 ---- src/hash_map.h | 44 -------------------------------------------- src/missing_deps.h | 7 ------- src/parser.cc | 7 ------- 7 files changed, 8 insertions(+), 65 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 22b815886c..9c0f27a93e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,6 +21,8 @@ endif() if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + # Note that these settings are separately specified in configure.py, and + # these lists should be kept in sync. add_compile_options(/W4 /wd4100 /wd4267 /wd4706 /wd4702 /wd4244 /GR- /Zc:__cplusplus) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) else() @@ -138,6 +140,8 @@ else() endif() endif() +target_compile_features(libninja PUBLIC cxx_std_11) + #Fixes GetActiveProcessorCount on MinGW if(MINGW) target_compile_definitions(libninja PRIVATE _WIN32_WINNT=0x0601 __USE_MINGW_ANSI_STDIO=1) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be1fc02779..c6c190c058 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,9 +14,6 @@ Generally it's the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) with a few additions: -* Any code merged into the Ninja codebase which will be part of the main - executable must compile as C++03. You may use C++11 features in a test or an - unimportant tool if you guard your code with `#if __cplusplus >= 201103L`. * We have used `using namespace std;` a lot in the past. For new contributions, please try to avoid relying on it and instead whenever possible use `std::`. However, please do not change existing code simply to add `std::` unless your diff --git a/configure.py b/configure.py index 43904349a8..99a2c86e35 100755 --- a/configure.py +++ b/configure.py @@ -305,6 +305,8 @@ def binary(name): else: n.variable('ar', configure_env.get('AR', 'ar')) +# Note that build settings are separately specified in CMakeLists.txt and +# these lists should be kept in sync. if platform.is_msvc(): cflags = ['/showIncludes', '/nologo', # Don't print startup banner. @@ -320,6 +322,7 @@ def binary(name): # Disable warnings about ignored typedef in DbgHelp.h '/wd4091', '/GR-', # Disable RTTI. + '/Zc:__cplusplus', # Disable size_t -> int truncation warning. # We never have strings or arrays larger than 2**31. '/wd4267', @@ -339,6 +342,7 @@ def binary(name): '-Wno-unused-parameter', '-fno-rtti', '-fno-exceptions', + '-std=c++11', '-fvisibility=hidden', '-pipe', '-DNINJA_PYTHON="%s"' % options.with_python] if options.debug: diff --git a/src/graph.cc b/src/graph.cc index 041199a37f..95fc1dc1b5 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -402,11 +402,7 @@ string EdgeEnv::LookupVariable(const string& var) { if (var == "in" || var == "in_newline") { int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ - edge_->order_only_deps_; -#if __cplusplus >= 201103L return MakePathList(edge_->inputs_.data(), explicit_deps_count, -#else - return MakePathList(&edge_->inputs_[0], explicit_deps_count, -#endif var == "in" ? ' ' : '\n'); } else if (var == "out") { int explicit_outs_count = edge_->outputs_.size() - edge_->implicit_outs_; diff --git a/src/hash_map.h b/src/hash_map.h index 55d2c9d46d..435360984d 100644 --- a/src/hash_map.h +++ b/src/hash_map.h @@ -53,7 +53,6 @@ unsigned int MurmurHash2(const void* key, size_t len) { return h; } -#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) #include namespace std { @@ -68,56 +67,13 @@ struct hash { }; } -#elif defined(_MSC_VER) -#include - -using stdext::hash_map; -using stdext::hash_compare; - -struct StringPieceCmp : public hash_compare { - size_t operator()(const StringPiece& key) const { - return MurmurHash2(key.str_, key.len_); - } - bool operator()(const StringPiece& a, const StringPiece& b) const { - int cmp = memcmp(a.str_, b.str_, min(a.len_, b.len_)); - if (cmp < 0) { - return true; - } else if (cmp > 0) { - return false; - } else { - return a.len_ < b.len_; - } - } -}; - -#else -#include - -using __gnu_cxx::hash_map; - -namespace __gnu_cxx { -template<> -struct hash { - size_t operator()(StringPiece key) const { - return MurmurHash2(key.str_, key.len_); - } -}; -} -#endif - /// A template for hash_maps keyed by a StringPiece whose string is /// owned externally (typically by the values). Use like: /// ExternalStringHash::Type foos; to make foos into a hash /// mapping StringPiece => Foo*. template struct ExternalStringHashMap { -#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) typedef std::unordered_map Type; -#elif defined(_MSC_VER) - typedef hash_map Type; -#else - typedef hash_map Type; -#endif }; #endif // NINJA_MAP_H_ diff --git a/src/missing_deps.h b/src/missing_deps.h index ae5707424c..7a615da2a5 100644 --- a/src/missing_deps.h +++ b/src/missing_deps.h @@ -19,9 +19,7 @@ #include #include -#if __cplusplus >= 201103L #include -#endif struct DepsLog; struct DiskInterface; @@ -68,13 +66,8 @@ struct MissingDependencyScanner { int missing_dep_path_count_; private: -#if __cplusplus >= 201103L using InnerAdjacencyMap = std::unordered_map; using AdjacencyMap = std::unordered_map; -#else - typedef std::map InnerAdjacencyMap; - typedef std::map AdjacencyMap; -#endif AdjacencyMap adjacency_map_; }; diff --git a/src/parser.cc b/src/parser.cc index 756922de11..5f303c557c 100644 --- a/src/parser.cc +++ b/src/parser.cc @@ -31,13 +31,6 @@ bool Parser::Load(const string& filename, string* err, Lexer* parent) { return false; } - // The lexer needs a nul byte at the end of its input, to know when it's done. - // It takes a StringPiece, and StringPiece's string constructor uses - // string::data(). data()'s return value isn't guaranteed to be - // null-terminated (although in practice - libc++, libstdc++, msvc's stl -- - // it is, and C++11 demands that too), so add an explicit nul byte. - contents.resize(contents.size() + 1); - return Parse(filename, contents, err); } From d4017a2b1ea642f12dabe05ec99b2a16c93e99aa Mon Sep 17 00:00:00 2001 From: "Igor [hyperxor]" <56217938+hyperxor@users.noreply.github.com> Date: Mon, 20 Jun 2022 09:45:29 +0300 Subject: [PATCH 046/127] Make IsDepsEntryLiveFor static and add const to parameter (#2141) --- src/deps_log.cc | 2 +- src/deps_log.h | 2 +- src/ninja.cc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/deps_log.cc b/src/deps_log.cc index 7e48b38513..e32a7a98ad 100644 --- a/src/deps_log.cc +++ b/src/deps_log.cc @@ -361,7 +361,7 @@ bool DepsLog::Recompact(const string& path, string* err) { return true; } -bool DepsLog::IsDepsEntryLiveFor(Node* node) { +bool DepsLog::IsDepsEntryLiveFor(const Node* node) { // Skip entries that don't have in-edges or whose edges don't have a // "deps" attribute. They were in the deps log from previous builds, but // the the files they were for were removed from the build and their deps diff --git a/src/deps_log.h b/src/deps_log.h index 09cc41c019..2a1b188906 100644 --- a/src/deps_log.h +++ b/src/deps_log.h @@ -97,7 +97,7 @@ struct DepsLog { /// past but are no longer part of the manifest. This function returns if /// this is the case for a given node. This function is slow, don't call /// it from code that runs on every build. - bool IsDepsEntryLiveFor(Node* node); + static bool IsDepsEntryLiveFor(const Node* node); /// Used for tests. const std::vector& nodes() const { return nodes_; } diff --git a/src/ninja.cc b/src/ninja.cc index 834e2846ee..9ae53deb7d 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -532,7 +532,7 @@ int NinjaMain::ToolDeps(const Options* options, int argc, char** argv) { if (argc == 0) { for (vector::const_iterator ni = deps_log_.nodes().begin(); ni != deps_log_.nodes().end(); ++ni) { - if (deps_log_.IsDepsEntryLiveFor(*ni)) + if (DepsLog::IsDepsEntryLiveFor(*ni)) nodes.push_back(*ni); } } else { From c47ff5aa33d0928f20d06986f8331e3bb5eba3bc Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Mon, 20 Jun 2022 16:18:36 +0200 Subject: [PATCH 047/127] Handle ERROR_DIRECTORY when calling FindFirstFileExA If a directory referenced by .ninja_deps has changed to a regular file since the last build, FindFirstFileExA will return ERROR_DIRECTORY. Fixes #2159 --- src/disk_interface.cc | 3 ++- src/disk_interface_test.cc | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/disk_interface.cc b/src/disk_interface.cc index e73d901c11..fa716eda78 100644 --- a/src/disk_interface.cc +++ b/src/disk_interface.cc @@ -110,7 +110,8 @@ bool StatAllFilesInDir(const string& dir, map* stamps, if (find_handle == INVALID_HANDLE_VALUE) { DWORD win_err = GetLastError(); - if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) + if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND || + win_err == ERROR_DIRECTORY) return true; *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString(); return false; diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc index 7041d98400..294df72ea7 100644 --- a/src/disk_interface_test.cc +++ b/src/disk_interface_test.cc @@ -65,6 +65,17 @@ TEST_F(DiskInterfaceTest, StatMissingFile) { EXPECT_EQ("", err); } +TEST_F(DiskInterfaceTest, StatMissingFileWithCache) { + disk_.AllowStatCache(true); + string err; + + // On Windows, the errno for FindFirstFileExA, which is used when the stat + // cache is enabled, is different when the directory name is not a directory. + ASSERT_TRUE(Touch("notadir")); + EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err)); + EXPECT_EQ("", err); +} + TEST_F(DiskInterfaceTest, StatBadPath) { string err; #ifdef _WIN32 From 87e50b0edde683e02673abc0de5fe162ff841349 Mon Sep 17 00:00:00 2001 From: Gergely Nagy Date: Sat, 23 Jul 2022 20:43:34 +0200 Subject: [PATCH 048/127] Fix building on Windows in UNICODE mode --- src/disk_interface.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/disk_interface.cc b/src/disk_interface.cc index e73d901c11..e64bb43f32 100644 --- a/src/disk_interface.cc +++ b/src/disk_interface.cc @@ -267,7 +267,7 @@ FileReader::Status RealDiskInterface::ReadFile(const string& path, int RealDiskInterface::RemoveFile(const string& path) { #ifdef _WIN32 - DWORD attributes = GetFileAttributes(path.c_str()); + DWORD attributes = GetFileAttributesA(path.c_str()); if (attributes == INVALID_FILE_ATTRIBUTES) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) { @@ -278,7 +278,7 @@ int RealDiskInterface::RemoveFile(const string& path) { // On Windows Ninja should behave the same: // https://github.com/ninja-build/ninja/issues/1886 // Skip error checking. If this fails, accept whatever happens below. - SetFileAttributes(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY); + SetFileAttributesA(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY); } if (attributes & FILE_ATTRIBUTE_DIRECTORY) { // remove() deletes both files and directories. On Windows we have to @@ -286,7 +286,7 @@ int RealDiskInterface::RemoveFile(const string& path) { // used on a directory) // This fixes the behavior of ninja -t clean in some cases // https://github.com/ninja-build/ninja/issues/828 - if (!RemoveDirectory(path.c_str())) { + if (!RemoveDirectoryA(path.c_str())) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) { return 1; @@ -296,7 +296,7 @@ int RealDiskInterface::RemoveFile(const string& path) { return -1; } } else { - if (!DeleteFile(path.c_str())) { + if (!DeleteFileA(path.c_str())) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) { return 1; From 93403e1d66d3a8a83506f808ba673cf1fa3ba9e6 Mon Sep 17 00:00:00 2001 From: Yang Zongze Date: Thu, 28 Jul 2022 14:35:24 +0800 Subject: [PATCH 049/127] Fixbug: SIGFPE error when cpu.cfs_period_us = 0 This will fix the issue (#2173). --- src/util.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/util.cc b/src/util.cc index 483f4a6250..ef5f103305 100644 --- a/src/util.cc +++ b/src/util.cc @@ -647,6 +647,8 @@ int ParseCPUFromCGroup() { readCount(cpu->second + "/cpu.cfs_period_us"); if (!period.second) return -1; + if (period.first == 0) + return -1; return quota.first / period.first; } #endif From 29fe3ef1faefa554bc9490716071e969e84774db Mon Sep 17 00:00:00 2001 From: Peter Bell Date: Wed, 10 Aug 2022 17:29:50 +0100 Subject: [PATCH 050/127] Simplify scheduler to not use build log/execution time --- src/build.cc | 123 ++++++++-------------------------------------- src/build.h | 6 +-- src/build_test.cc | 108 +++------------------------------------- src/graph.h | 45 +++++++---------- src/graph_test.cc | 4 +- 5 files changed, 51 insertions(+), 235 deletions(-) diff --git a/src/build.cc b/src/build.cc index 9ace8c909e..c3dc558704 100644 --- a/src/build.cc +++ b/src/build.cc @@ -450,89 +450,16 @@ struct SeenBefore { } }; -// Assign run_time_ms for all wanted edges, and returns total time for all edges -// For phony edges, 0 cost. -// For edges with a build history, use the last build time. -// For edges without history, use the 75th percentile time for edges with history. -// Or, if there is no history at all just use 1 -int64_t AssignEdgeRuntime(BuildLog* build_log, - const std::map& want) { - bool missing_durations = false; - std::vector durations; - int64_t total_time = 0; - const int64_t kUnknownRunTime = -1; // marker value for the two loops below. - - for (std::map::const_iterator it = want.begin(), - end = want.end(); - it != end; ++it) { - Edge* edge = it->first; - if (edge->is_phony()) { - continue; - } - BuildLog::LogEntry* entry = - build_log->LookupByOutput(edge->outputs_[0]->path()); - if (!entry) { - missing_durations = true; - edge->set_run_time_ms(kUnknownRunTime); // mark as needing filled in - continue; - } - const int64_t duration = entry->end_time - entry->start_time; - edge->set_run_time_ms(duration); - total_time += duration; - durations.push_back(duration); - } - - if (!missing_durations) { - return total_time; - } - - // Heuristic: for unknown edges, take the 75th percentile time. - // This allows the known-slowest jobs to run first, but isn't so - // small that it is always the lowest priority. Which for slow jobs, - // might bottleneck the build. - int64_t p75_time = 1; - int64_t num_durations = static_cast(durations.size()); - if (num_durations > 0) { - size_t p75_idx = (num_durations - 1) - num_durations / 4; - std::vector::iterator p75_it = durations.begin() + p75_idx; - std::nth_element(durations.begin(), p75_it, durations.end()); - p75_time = *p75_it; - } - - for (std::map::const_iterator it = want.begin(), - end = want.end(); - it != end; ++it) { - Edge* edge = it->first; - if (edge->run_time_ms() != kUnknownRunTime) { - continue; - } - edge->set_run_time_ms(p75_time); - total_time += p75_time; - } - return total_time; -} - -int64_t AssignDefaultEdgeRuntime(std::map &want) { - int64_t total_time = 0; - - for (std::map::const_iterator it = want.begin(), - end = want.end(); - it != end; ++it) { - Edge* edge = it->first; - if (edge->is_phony()) { - continue; - } - - edge->set_run_time_ms(1); - ++total_time; - } - return total_time; +// Heuristic for edge priority weighting. +// Phony edges are free (0 cost), all other edges are weighted equally. +int64_t EdgeWeightHeuristic(Edge *edge) { + return edge->is_phony() ? 0 : 1; } } // namespace -void Plan::ComputeCriticalTime(BuildLog* build_log) { - METRIC_RECORD("ComputeCriticalTime"); +void Plan::ComputeCriticalPath() { + METRIC_RECORD("ComputeCriticalPath"); // Remove duplicate targets { std::set seen; @@ -541,16 +468,8 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { targets_.end()); } - // total time if building all edges in serial. This value is big - // enough to ensure higher priority target's initial critical time - // is always bigger than lower ones - const int64_t total_time = build_log ? - AssignEdgeRuntime(build_log, want_) : - AssignDefaultEdgeRuntime(want_); // Plan tests have no build_log - - - // Use backflow algorithm to compute critical times for all nodes, starting - // from the destination nodes. + // Use backflow algorithm to compute the critical path for all + // nodes, starting from the destination nodes. // XXX: ignores pools std::queue work_queue; // Queue, for breadth-first traversal // The set of edges currently in work_queue, to avoid duplicates. @@ -560,12 +479,9 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { for (size_t i = 0; i < targets_.size(); ++i) { const Node* target = targets_[i]; if (Edge* in = target->in_edge()) { - // Add a bias to ensure that targets that appear first in |targets_| have a larger critical time than - // those that follow them. E.g. for 3 targets: [2*total_time, total_time, 0]. - int64_t priority_weight = (targets_.size() - i - 1) * total_time; - in->set_critical_time_ms( - priority_weight + - std::max(in->run_time_ms(), in->critical_time_ms())); + int64_t edge_weight = EdgeWeightHeuristic(in); + in->set_critical_path_weight( + std::max(edge_weight, in->critical_path_weight())); if (!seen_edge(in)) { work_queue.push(in); } @@ -575,7 +491,7 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { while (!work_queue.empty()) { Edge* e = work_queue.front(); work_queue.pop(); - // If the critical time of any dependent edges is updated, this + // If the critical path of any dependent edges is updated, this // edge may need to be processed again. So re-allow insertion. active_edges.erase(e); @@ -586,10 +502,11 @@ void Plan::ComputeCriticalTime(BuildLog* build_log) { if (!in) { continue; } - // Only process edge if this node offers a higher critical time - const int64_t proposed_time = e->critical_time_ms() + in->run_time_ms(); - if (proposed_time > in->critical_time_ms()) { - in->set_critical_time_ms(proposed_time); + // Only process edge if this node offers a higher weighted path + const int64_t edge_weight = EdgeWeightHeuristic(in); + const int64_t proposed_weight = e->critical_path_weight() + edge_weight; + if (proposed_weight > in->critical_path_weight()) { + in->set_critical_path_weight(proposed_weight); if (!seen_edge(in)) { work_queue.push(in); } @@ -629,8 +546,8 @@ void Plan::ScheduleInitialEdges() { } } -void Plan::PrepareQueue(BuildLog* build_log) { - ComputeCriticalTime(build_log); +void Plan::PrepareQueue() { + ComputeCriticalPath(); ScheduleInitialEdges(); } @@ -803,7 +720,7 @@ bool Builder::AlreadyUpToDate() const { bool Builder::Build(string* err) { assert(!AlreadyUpToDate()); - plan_.PrepareQueue(scan_.build_log()); + plan_.PrepareQueue(); status_->PlanHasTotalEdges(plan_.command_edge_count()); int pending_commands = 0; diff --git a/src/build.h b/src/build.h index e8b7c39880..63d06826b2 100644 --- a/src/build.h +++ b/src/build.h @@ -76,7 +76,7 @@ struct Plan { void Reset(); // After all targets have been added, prepares the ready queue for find work. - void PrepareQueue(BuildLog* build_log); + void PrepareQueue(); /// Update the build plan to account for modifications made to the graph /// by information loaded from a dyndep file. @@ -97,14 +97,14 @@ struct Plan { }; private: - void ComputeCriticalTime(BuildLog* build_log); + void ComputeCriticalPath(); bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err); void UnmarkDependents(const Node* node, std::set* dependents); bool AddSubTarget(const Node* node, const Node* dependent, std::string* err, std::set* dyndep_walk); // Add edges that kWantToStart into the ready queue - // Must be called after ComputeCriticalTime and before FindWork + // Must be called after ComputeCriticalPath and before FindWork void ScheduleInitialEdges(); /// Update plan with knowledge that the given node is up to date. diff --git a/src/build_test.cc b/src/build_test.cc index 4c274e64cd..176f1a3f03 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -54,7 +54,7 @@ struct PlanTest : public StateTestWithBuiltinRules { string err; EXPECT_TRUE(plan_.AddTarget(GetNode(node), &err)); ASSERT_EQ("", err); - plan_.PrepareQueue(log); + plan_.PrepareQueue(); ASSERT_TRUE(plan_.more_to_do()); } @@ -207,7 +207,7 @@ void PlanTest::TestPoolWithDepthOne(const char* test_case) { ASSERT_EQ("", err); EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err)); ASSERT_EQ("", err); - plan_.PrepareQueue(NULL); + plan_.PrepareQueue(); ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); @@ -434,7 +434,7 @@ TEST_F(PlanTest, PoolWithFailingEdge) { ASSERT_EQ("", err); EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err)); ASSERT_EQ("", err); - plan_.PrepareQueue(NULL); + plan_.PrepareQueue(); ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); @@ -491,11 +491,11 @@ TEST_F(PlanTest, PriorityWithoutBuildLog) { BuildLog log; PrepareForTarget("out", &log); - EXPECT_EQ(GetNode("out")->in_edge()->critical_time_ms(), 1); - EXPECT_EQ(GetNode("a0")->in_edge()->critical_time_ms(), 2); - EXPECT_EQ(GetNode("b0")->in_edge()->critical_time_ms(), 2); - EXPECT_EQ(GetNode("c0")->in_edge()->critical_time_ms(), 2); - EXPECT_EQ(GetNode("a1")->in_edge()->critical_time_ms(), 3); + EXPECT_EQ(GetNode("out")->in_edge()->critical_path_weight(), 1); + EXPECT_EQ(GetNode("a0")->in_edge()->critical_path_weight(), 2); + EXPECT_EQ(GetNode("b0")->in_edge()->critical_path_weight(), 2); + EXPECT_EQ(GetNode("c0")->in_edge()->critical_path_weight(), 2); + EXPECT_EQ(GetNode("a1")->in_edge()->critical_path_weight(), 3); const int n_edges = 5; const char *expected_order[n_edges] = { @@ -513,98 +513,6 @@ TEST_F(PlanTest, PriorityWithoutBuildLog) { EXPECT_FALSE(plan_.FindWork()); } -TEST_F(PlanTest, PriorityWithBuildLog) { - // With a build log, the critical time is longest weighted path. - // Test with the following graph: - // a2 - // | - // a1 b1 - // | | | - // a0 b0 c0 - // \ | / - // out - - ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, - "rule r\n" - " command = unused\n" - "build out: r a0 b0 c0\n" - "build a0: r a1\n" - "build a1: r a2\n" - "build b0: r b1\n" - "build c0: r b1\n" - )); - GetNode("a1")->MarkDirty(); - GetNode("a0")->MarkDirty(); - GetNode("b0")->MarkDirty(); - GetNode("c0")->MarkDirty(); - GetNode("out")->MarkDirty(); - - BuildLog log; - log.RecordCommand(GetNode("out")->in_edge(), 0, 100); // time = 100 - log.RecordCommand(GetNode("a0")->in_edge(), 10, 20); // time = 10 - log.RecordCommand(GetNode("a1")->in_edge(), 20, 40); // time = 20 - log.RecordCommand(GetNode("b0")->in_edge(), 10, 30); // time = 20 - log.RecordCommand(GetNode("c0")->in_edge(), 20, 70); // time = 50 - - PrepareForTarget("out", &log); - - EXPECT_EQ(GetNode("out")->in_edge()->critical_time_ms(), 100); - EXPECT_EQ(GetNode("a0")->in_edge()->critical_time_ms(), 110); - EXPECT_EQ(GetNode("b0")->in_edge()->critical_time_ms(), 120); - EXPECT_EQ(GetNode("c0")->in_edge()->critical_time_ms(), 150); - EXPECT_EQ(GetNode("a1")->in_edge()->critical_time_ms(), 130); - - const int n_edges = 5; - const char *expected_order[n_edges] = { - "c0", "a1", "b0", "a0", "out"}; - for (int i = 0; i < n_edges; ++i) { - Edge* edge = plan_.FindWork(); - ASSERT_NE(edge, NULL); - EXPECT_EQ(expected_order[i], edge->outputs_[0]->path()); - - std::string err; - ASSERT_TRUE(plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err)); - EXPECT_EQ(err, ""); - } - EXPECT_FALSE(plan_.FindWork()); -} - -TEST_F(PlanTest, RuntimePartialBuildLog) { - // Test the edge->run_time_ms() estimate when no build log is available - - ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, - "rule r\n" - " command = unused\n" - "build out: r a0 b0 c0 d0\n" - "build a0: r a1\n" - "build b0: r b1\n" - "build c0: r c1\n" - "build d0: r d1\n" - )); - GetNode("a0")->MarkDirty(); - GetNode("b0")->MarkDirty(); - GetNode("c0")->MarkDirty(); - GetNode("d0")->MarkDirty(); - GetNode("out")->MarkDirty(); - - BuildLog log; - log.RecordCommand(GetNode("out")->in_edge(), 0, 100); // time = 40 - log.RecordCommand(GetNode("a0")->in_edge(), 10, 20); // time = 10 - log.RecordCommand(GetNode("b0")->in_edge(), 20, 40); // time = 20 - log.RecordCommand(GetNode("c0")->in_edge(), 10, 40); // time = 30 - - PrepareForTarget("out", &log); - - // These edges times are read from the build log - EXPECT_EQ(GetNode("out")->in_edge()->run_time_ms(), 100); - EXPECT_EQ(GetNode("a0")->in_edge()->run_time_ms(), 10); - EXPECT_EQ(GetNode("b0")->in_edge()->run_time_ms(), 20); - EXPECT_EQ(GetNode("c0")->in_edge()->run_time_ms(), 30); - - // The missing data is taken from the 3rd quintile of known data - EXPECT_EQ(GetNode("d0")->in_edge()->run_time_ms(), 30); -} - /// Fake implementation of CommandRunner, useful for tests. struct FakeCommandRunner : public CommandRunner { explicit FakeCommandRunner(VirtualFileSystem* fs) : diff --git a/src/graph.h b/src/graph.h index db82bc5609..c7304a6d35 100644 --- a/src/graph.h +++ b/src/graph.h @@ -174,7 +174,7 @@ struct Edge { id_(0), outputs_ready_(false), deps_loaded_(false), deps_missing_(false), generated_by_dep_loader_(false), command_start_time_(0), implicit_deps_(0), order_only_deps_(0), - run_time_ms_(0), critical_time_ms_(-1), implicit_outs_(0) {} + critical_path_weight_(-1), implicit_outs_(0) {} /// Return true if all inputs' in-edges are ready. bool AllInputsReady() const; @@ -200,21 +200,13 @@ struct Edge { // Append all edge explicit inputs to |*out|. Possibly with shell escaping. void CollectInputs(bool shell_escape, std::vector* out) const; - // Critical time is the estimated execution time in ms of the edges - // forming the longest time-weighted path to the target output. - // This quantity is used as a priority during build scheduling. - // NOTE: Defaults to -1 as a marker smaller than any valid time - int64_t critical_time_ms() const { return critical_time_ms_; } - void set_critical_time_ms(int64_t critical_time_ms) { - critical_time_ms_ = critical_time_ms; - } - - // Run time in ms for this edge's command. - // Taken from the build log if present, or estimated otherwise. - // Default initialized to 0. - int64_t run_time_ms() const { return run_time_ms_; } - void set_run_time_ms(int64_t run_time_ms) { - run_time_ms_ = run_time_ms; + // critical_path_weight is the priority during build scheduling. The + // "critical path" between this edge's inputs and any target node is + // the path which maximises the sum oof weights along that path. + // NOTE: Defaults to -1 as a marker smaller than any valid weight + int64_t critical_path_weight() const { return critical_path_weight_; } + void set_critical_path_weight(int64_t critical_path_weight) { + critical_path_weight_ = critical_path_weight; } const Rule* rule_; @@ -226,8 +218,7 @@ struct Edge { BindingEnv* env_; VisitMark mark_; size_t id_; - int64_t run_time_ms_; - int64_t critical_time_ms_; + int64_t critical_path_weight_; bool outputs_ready_; bool deps_loaded_; bool deps_missing_; @@ -392,15 +383,15 @@ struct DependencyScan { // priority is defined lexicographically first by largest critical // time, then lowest ID. // -// Including ID means that wherever the critical times are the same, -// the edges are executed in ascending ID order which was historically -// how all tasks were scheduled. +// Including ID means that wherever the critical path weights are the +// same, the edges are executed in ascending ID order which was +// historically how all tasks were scheduled. struct EdgePriorityLess { bool operator()(const Edge* e1, const Edge* e2) const { - const int64_t ct1 = e1->critical_time_ms(); - const int64_t ct2 = e2->critical_time_ms(); - if (ct1 != ct2) { - return ct1 < ct2; + const int64_t cw1 = e1->critical_path_weight(); + const int64_t cw2 = e2->critical_path_weight(); + if (cw1 != cw2) { + return cw1 < cw2; } return e1->id_ > e2->id_; } @@ -414,8 +405,8 @@ struct EdgePriorityGreater { }; // A priority queue holding non-owning Edge pointers. top() will -// return the edge with the largest critical time, and lowest ID if -// more than one edge has the same critical time. +// return the edge with the largest critical path weight, and lowest +// ID if more than one edge has the same critical path weight. class EdgePriorityQueue: public std::priority_queue, EdgePriorityLess>{ public: diff --git a/src/graph_test.cc b/src/graph_test.cc index 67dad53b5f..fb0513c052 100644 --- a/src/graph_test.cc +++ b/src/graph_test.cc @@ -998,7 +998,7 @@ TEST_F(GraphTest, EdgeQueuePriority) { // Output is largest critical time to smallest for (int i = 0; i < n_edges; ++i) { - edges[i]->set_critical_time_ms(i * 10); + edges[i]->set_critical_path_weight(i * 10); } EdgePriorityQueue queue; @@ -1015,7 +1015,7 @@ TEST_F(GraphTest, EdgeQueuePriority) { // When there is ambiguity, the lowest edge id comes first for (int i = 0; i < n_edges; ++i) { - edges[i]->set_critical_time_ms(0); + edges[i]->set_critical_path_weight(0); } queue.push(edges[1]); From 2671855b18c1545ad6da2c563bf76eb3499504bd Mon Sep 17 00:00:00 2001 From: Michael Jones Date: Thu, 24 Mar 2022 17:46:57 -0500 Subject: [PATCH 051/127] Compile getopt as a C++ file, so that CMake need not look for a C compiler at all --- CMakeLists.txt | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c0f27a93e..76bfb6237f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,7 +5,7 @@ include(CheckIPOSupported) option(NINJA_BUILD_BINARY "Build ninja binary" ON) -project(ninja) +project(ninja CXX) # --- optional link-time optimization check_ipo_supported(RESULT lto_supported OUTPUT error) @@ -128,10 +128,18 @@ if(WIN32) src/getopt.c src/minidump-win32.cc ) + # Build getopt.c, which can be compiled as either C or C++, as C++ + # so that build environments which lack a C compiler, but have a C++ + # compiler may build ninja. + set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX) else() target_sources(libninja PRIVATE src/subprocess-posix.cc) if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX") target_sources(libninja PRIVATE src/getopt.c) + # Build getopt.c, which can be compiled as either C or C++, as C++ + # so that build environments which lack a C compiler, but have a C++ + # compiler may build ninja. + set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX) endif() # Needed for perfstat_cpu_total From f6aa7537a7d003f17ebce2fdcb32bddf26fca305 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 30 Aug 2022 14:05:10 -0400 Subject: [PATCH 052/127] Remove requirement for Google copyright from CONTRIBUTING We should keep the Google copyright on files authored by people at Google, but for new files added by people not at Google, this really isn't necessary. (Someone from open-source compliance at Google reached out to me and suggested removing this line.) --- CONTRIBUTING.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c6c190c058..37f6ebc3fd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,7 +18,6 @@ a few additions: please try to avoid relying on it and instead whenever possible use `std::`. However, please do not change existing code simply to add `std::` unless your contribution already needs to change that line of code anyway. -* All source files should have the Google Inc. license header. * Use `///` for [Doxygen](http://www.doxygen.nl/) (use `\a` to refer to arguments). * It's not necessary to document each argument, especially when they're From ff06e229878a780192aee3c6d5d9d270e8b4c4b0 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 30 Aug 2022 21:43:53 +0200 Subject: [PATCH 053/127] mark this 1.12.0.git --- src/version.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.cc b/src/version.cc index bdcbc53e50..d3069579f7 100644 --- a/src/version.cc +++ b/src/version.cc @@ -20,7 +20,7 @@ using namespace std; -const char* kNinjaVersion = "1.11.0.git"; +const char* kNinjaVersion = "1.12.0.git"; void ParseVersion(const string& version, int* major, int* minor) { size_t end = version.find('.'); From aa25b3dc9d56a87d64c31e7b77df1d3da9438692 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 30 Aug 2022 22:35:31 +0200 Subject: [PATCH 054/127] Convert RELEASING to Markdown and add instruction for GitHub release --- RELEASING => RELEASING.md | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) rename RELEASING => RELEASING.md (54%) diff --git a/RELEASING b/RELEASING.md similarity index 54% rename from RELEASING rename to RELEASING.md index 0b033412bb..4e3a4bdcc1 100644 --- a/RELEASING +++ b/RELEASING.md @@ -1,33 +1,41 @@ Notes to myself on all the steps to make for a Ninja release. -Push new release branch: +### Push new release branch: 1. Run afl-fuzz for a day or so and run ninja_test 2. Consider sending a heads-up to the ninja-build mailing list first 3. Make sure branches 'master' and 'release' are synced up locally 4. Update src/version.cc with new version (with ".git"), then - git commit -am 'mark this 1.5.0.git' + ``` + git commit -am 'mark this 1.5.0.git' + ``` 5. git checkout release; git merge master 6. Fix version number in src/version.cc (it will likely conflict in the above) 7. Fix version in doc/manual.asciidoc (exists only on release branch) 8. commit, tag, push (don't forget to push --tags) - git commit -am v1.5.0; git push origin release - git tag v1.5.0; git push --tags - # Push the 1.5.0.git change on master too: - git checkout master; git push origin master + ``` + git commit -am v1.5.0; git push origin release + git tag v1.5.0; git push --tags + # Push the 1.5.0.git change on master too: + git checkout master; git push origin master + ``` 9. Construct release notes from prior notes - credits: git shortlog -s --no-merges REV.. -Release on github: -1. https://github.com/blog/1547-release-your-software - Add binaries to https://github.com/ninja-build/ninja/releases + credits: `git shortlog -s --no-merges REV..` -Make announcement on mailing list: + +### Release on GitHub: +1. Go to [Tags](https://github.com/ninja-build/ninja/tags) +2. Open the newly created tag and select "Create release from tag" +3. Create the release which will trigger a build which automatically attaches + the binaries + +### Make announcement on mailing list: 1. copy old mail -Update website: +### Update website: 1. Make sure your ninja checkout is on the v1.5.0 tag 2. Clone https://github.com/ninja-build/ninja-build.github.io 3. In that repo, `./update-docs.sh` 4. Update index.html with newest version and link to release notes -5. git commit -m 'run update-docs.sh, 1.5.0 release' -6. git push origin master +5. `git commit -m 'run update-docs.sh, 1.5.0 release'` +6. `git push origin master` From 184bfeb5cc802539e51bd344bedb4749b1dfae58 Mon Sep 17 00:00:00 2001 From: Paul Seyfert Date: Sun, 4 Sep 2022 11:35:49 +0200 Subject: [PATCH 055/127] Add --quiet flag to zsh completion --- misc/zsh-completion | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/misc/zsh-completion b/misc/zsh-completion index d42dac220c..d439df3994 100644 --- a/misc/zsh-completion +++ b/misc/zsh-completion @@ -68,7 +68,8 @@ _arguments \ '-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \ '-k+[Keep going until N jobs fail (default=1)]:number of jobs' \ '-n[Dry run (do not run commands but act like they succeeded)]' \ - '(-v --verbose)'{-v,--verbose}'[Show all command lines while building]' \ + '(-v --verbose --quiet)'{-v,--verbose}'[Show all command lines while building]' \ + "(-v --verbose --quiet)--quiet[Don't show progress status, just command output]" \ '-d+[Enable debugging (use -d list to list modes)]:modes:_ninja-modes' \ '-t+[Run a subtool (use -t list to list subtools)]:tools:_ninja-tools' \ '*::targets:_ninja-targets' From b39e5d7e5d782e1d1c00f24d2318b5aed1521d4a Mon Sep 17 00:00:00 2001 From: Martin Tzvetanov Grigorov Date: Wed, 5 Oct 2022 11:41:35 +0300 Subject: [PATCH 056/127] Add build job for Linux ARM64 Signed-off-by: Martin Tzvetanov Grigorov --- .github/workflows/linux.yml | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 3c93e00745..57a569e352 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -147,3 +147,63 @@ jobs: ./ninja_test --gtest_filter=-SubprocessTest.SetWithLots python3 misc/ninja_syntax_test.py ./misc/output_test.py + + build-aarch64: + name: Build Linux ARM64 + runs-on: [ubuntu-latest] + steps: + - uses: actions/checkout@v3 + + - name: Build + uses: uraimo/run-on-arch-action@v2 + with: + arch: aarch64 + distro: ubuntu18.04 + githubToken: ${{ github.token }} + dockerRunArgs: | + --volume "${PWD}:/ninja" + install: | + apt-get update -q -y + apt-get install -q -y make gcc g++ libasan5 clang-tools curl p7zip-full file + run: | + set -x + cd /ninja + + # INSTALL CMAKE + CMAKE_VERSION=3.23.4 + curl -L -O https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-aarch64.sh + chmod +x cmake-${CMAKE_VERSION}-Linux-aarch64.sh + ./cmake-${CMAKE_VERSION}-Linux-aarch64.sh --skip-license --prefix=/usr/local + + # BUILD + cmake -DCMAKE_BUILD_TYPE=Release -B release-build + cmake --build release-build --parallel --config Release + strip release-build/ninja + file release-build/ninja + + # TEST + pushd release-build + ./ninja_test + popd + + # CREATE ARCHIVE + mkdir artifact + 7z a artifact/ninja-linux-aarch64.zip ./release-build/ninja + + # Upload ninja binary archive as an artifact + - name: Upload artifact + uses: actions/upload-artifact@v1 + with: + name: ninja-binary-archives + path: artifact + + - name: Upload release asset + if: github.event.action == 'published' + uses: actions/upload-release-asset@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ github.event.release.upload_url }} + asset_path: ./artifact/ninja-linux-aarch64.zip + asset_name: ninja-linux-aarch64.zip + asset_content_type: application/zip From 7bba11ae704efc84cac5fde5e9be53f653f237d1 Mon Sep 17 00:00:00 2001 From: Ma Aiguo Date: Sun, 9 Oct 2022 17:50:29 +0800 Subject: [PATCH 057/127] support 32bit system like i386 (#829) --- src/build_log_test.cc | 6 +++++- src/deps_log_test.cc | 43 ++++++++++++++++++++++++++++++++++++++++--- src/disk_interface.cc | 5 +++++ src/util.cc | 5 +++++ 4 files changed, 55 insertions(+), 4 deletions(-) diff --git a/src/build_log_test.cc b/src/build_log_test.cc index 37182994d2..f03100d809 100644 --- a/src/build_log_test.cc +++ b/src/build_log_test.cc @@ -133,9 +133,13 @@ TEST_F(BuildLogTest, Truncate) { log1.RecordCommand(state_.edges_[1], 20, 25); log1.Close(); } - +#ifdef __USE_LARGEFILE64 + struct stat64 statbuf; + ASSERT_EQ(0, stat64(kTestFilename, &statbuf)); +#else struct stat statbuf; ASSERT_EQ(0, stat(kTestFilename, &statbuf)); +#endif ASSERT_GT(statbuf.st_size, 0); // For all possible truncations of the input file, assert that we don't diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc index 13fcc788b6..cb1c925532 100644 --- a/src/deps_log_test.cc +++ b/src/deps_log_test.cc @@ -138,9 +138,13 @@ TEST_F(DepsLogTest, DoubleEntry) { deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); - +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif file_size = (int)st.st_size; ASSERT_GT(file_size, 0); } @@ -160,9 +164,13 @@ TEST_F(DepsLogTest, DoubleEntry) { deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); - +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif int file_size_2 = (int)st.st_size; ASSERT_EQ(file_size, file_size_2); } @@ -198,9 +206,13 @@ TEST_F(DepsLogTest, Recompact) { log.RecordDeps(state.GetNode("other_out.o", 0), 1, deps); log.Close(); - +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif file_size = (int)st.st_size; ASSERT_GT(file_size, 0); } @@ -222,8 +234,13 @@ TEST_F(DepsLogTest, Recompact) { log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif file_size_2 = (int)st.st_size; // The file should grow to record the new deps. ASSERT_GT(file_size_2, file_size); @@ -273,8 +290,13 @@ TEST_F(DepsLogTest, Recompact) { ASSERT_EQ(other_out, log.nodes()[other_out->id()]); // The file should have shrunk a bit for the smaller deps. +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif file_size_3 = (int)st.st_size; ASSERT_LT(file_size_3, file_size_2); } @@ -317,8 +339,13 @@ TEST_F(DepsLogTest, Recompact) { ASSERT_EQ(-1, state.LookupNode("baz.h")->id()); // The file should have shrunk more. +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif int file_size_4 = (int)st.st_size; ASSERT_LT(file_size_4, file_size_3); } @@ -374,8 +401,13 @@ TEST_F(DepsLogTest, Truncated) { } // Get the file size. +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif // Try reloading at truncated sizes. // Track how many nodes/deps were found; they should decrease with @@ -434,8 +466,13 @@ TEST_F(DepsLogTest, TruncatedRecovery) { // Shorten the file, corrupting the last record. { +#ifdef __USE_LARGEFILE64 + struct stat64 st; + ASSERT_EQ(0, stat64(kTestFilename, &st)); +#else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); +#endif string err; ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err)); } diff --git a/src/disk_interface.cc b/src/disk_interface.cc index e64bb43f32..7277c3e912 100644 --- a/src/disk_interface.cc +++ b/src/disk_interface.cc @@ -194,9 +194,14 @@ TimeStamp RealDiskInterface::Stat(const string& path, string* err) const { } DirCache::iterator di = ci->second.find(base); return di != ci->second.end() ? di->second : 0; +#else +#ifdef __USE_LARGEFILE64 + struct stat64 st; + if (stat64(path.c_str(), &st) < 0) { #else struct stat st; if (stat(path.c_str(), &st) < 0) { +#endif if (errno == ENOENT || errno == ENOTDIR) return 0; *err = "stat(" + path + "): " + strerror(errno); diff --git a/src/util.cc b/src/util.cc index ef5f103305..eefa3f50cd 100644 --- a/src/util.cc +++ b/src/util.cc @@ -369,8 +369,13 @@ int ReadFile(const string& path, string* contents, string* err) { return -errno; } +#ifdef __USE_LARGEFILE64 + struct stat64 st; + if (fstat64(fileno(f), &st) < 0) { +#else struct stat st; if (fstat(fileno(f), &st) < 0) { +#endif err->assign(strerror(errno)); fclose(f); return -errno; From 6201d8d3738948785452d8f242336e7d06f71f2e Mon Sep 17 00:00:00 2001 From: Sebastian Grabowski Date: Tue, 11 Oct 2022 09:43:14 +0200 Subject: [PATCH 058/127] Increase required version of re2c to 0.15.3 The `--no-version` command line argument was introduced with re2c v0.15.3. See https://re2c.org/releases/changelog/changelog.html --- configure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.py b/configure.py index 28ff855156..7a5e2d33b8 100755 --- a/configure.py +++ b/configure.py @@ -478,7 +478,7 @@ def shell_escape(str): def has_re2c(): try: proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE) - return int(proc.communicate()[0], 10) >= 1103 + return int(proc.communicate()[0], 10) >= 1503 except OSError: return False if has_re2c(): @@ -489,7 +489,7 @@ def has_re2c(): n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc')) n.build(src('lexer.cc'), 're2c', src('lexer.in.cc')) else: - print("warning: A compatible version of re2c (>= 0.11.3) was not found; " + print("warning: A compatible version of re2c (>= 0.15.3) was not found; " "changes to src/*.in.cc will not affect your build.") n.newline() From 721f09bda72144ed1ab7257b6aa50eecc8abac8a Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Mon, 24 Oct 2022 16:36:07 -0700 Subject: [PATCH 059/127] Remove bad merge text from .md file --- CONTRIBUTING.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f003750e2a..37f6ebc3fd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,9 +14,6 @@ Generally it's the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) with a few additions: -* Any code merged into the Ninja codebase which will be part of the main - executable must compile as C++11. You may use C++14 features in a test or an - unimportant tool if you guard your code with `#if __cplusplus >= 201402L`. * We have used `using namespace std;` a lot in the past. For new contributions, please try to avoid relying on it and instead whenever possible use `std::`. However, please do not change existing code simply to add `std::` unless your From f5f7feaf21983f233ccc380ae95c79360cdb0bbc Mon Sep 17 00:00:00 2001 From: Bruce Dawson Date: Mon, 24 Oct 2022 19:36:44 -0700 Subject: [PATCH 060/127] Revert configure.py change --- configure.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 configure.py diff --git a/configure.py b/configure.py old mode 100644 new mode 100755 From 9e9f64ffc3f5474976dbfd7cc82635a7e3204e3f Mon Sep 17 00:00:00 2001 From: Thad House Date: Mon, 31 Oct 2022 16:50:01 -0700 Subject: [PATCH 061/127] Add build job for Windows Arm64 --- .github/workflows/windows.yml | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e4fe7bdb7e..5ef1494764 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -54,3 +54,43 @@ jobs: asset_path: ./artifact/ninja-win.zip asset_name: ninja-win.zip asset_content_type: application/zip + + build-arm64: + runs-on: windows-latest + + steps: + - uses: actions/checkout@v2 + + - name: Install dependencies + run: choco install re2c + + - name: Build ninja + shell: bash + run: | + cmake -Bbuild -A arm64 + cmake --build build --parallel --config Debug + cmake --build build --parallel --config Release + + - name: Create ninja archive + shell: bash + run: | + mkdir artifact + 7z a artifact/ninja-winarm64.zip ./build/Release/ninja.exe + + # Upload ninja binary archive as an artifact + - name: Upload artifact + uses: actions/upload-artifact@v1 + with: + name: ninja-binary-archives + path: artifact + + - name: Upload release asset + if: github.event.action == 'published' + uses: actions/upload-release-asset@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ github.event.release.upload_url }} + asset_path: ./artifact/ninja-winarm64.zip + asset_name: ninja-winarm64.zip + asset_content_type: application/zip From bfa999c8c6746a6b88750693b3b97f56aae59e8e Mon Sep 17 00:00:00 2001 From: "xiaojian.liang" Date: Sun, 4 Dec 2022 14:28:08 +0800 Subject: [PATCH 062/127] fix garbled error message, like Chinese in Windows --- src/util.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util.cc b/src/util.cc index eefa3f50cd..0553de346c 100644 --- a/src/util.cc +++ b/src/util.cc @@ -461,7 +461,7 @@ string GetLastErrorString() { FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), (char*)&msg_buf, 0, NULL); From 870b59b71f826c962624fb9a9d2f24818d715bd6 Mon Sep 17 00:00:00 2001 From: Cristian Adam Date: Mon, 12 Dec 2022 23:04:02 +0100 Subject: [PATCH 063/127] Add longPathAware manifest to enable long paths on Windows Fixes: #1900 --- windows/ninja.manifest | 1 + 1 file changed, 1 insertion(+) diff --git a/windows/ninja.manifest b/windows/ninja.manifest index dab929e151..47949dd7ce 100644 --- a/windows/ninja.manifest +++ b/windows/ninja.manifest @@ -3,6 +3,7 @@ UTF-8 + true From 089927e0a7082887304a8d8ed8c122e8615d9cf1 Mon Sep 17 00:00:00 2001 From: William Kent Date: Tue, 13 Dec 2022 13:27:24 -0500 Subject: [PATCH 064/127] Add overridable parameter for name of Python binary (#2224) * Add overridable parameter for name of Python binary On macOS, python is not an available command any more. python3 is. * Use option() as requested * Store variable in different way --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 76bfb6237f..98f7948b77 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -90,6 +90,8 @@ function(check_platform_supports_browse_mode RESULT) endfunction() +set(NINJA_PYTHON "python" CACHE STRING "Python interpreter to use for the browse tool") + check_platform_supports_browse_mode(platform_supports_ninja_browse) # Core source files all build into ninja library. @@ -195,7 +197,7 @@ if(platform_supports_ninja_browse) PROPERTIES OBJECT_DEPENDS "${PROJECT_BINARY_DIR}/build/browse_py.h" INCLUDE_DIRECTORIES "${PROJECT_BINARY_DIR}" - COMPILE_DEFINITIONS NINJA_PYTHON="python" + COMPILE_DEFINITIONS NINJA_PYTHON="${NINJA_PYTHON}" ) endif() From 6a17e84370064eec6f22cfb1717ab80cf898d82b Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Tue, 10 Jan 2023 10:46:45 -0500 Subject: [PATCH 065/127] Use python3 in all run lines We already did this in some, this converts the rest. Also chmod +x on write_fake_manifests.py while here. --- configure.py | 4 +--- misc/measure.py | 4 +--- misc/ninja_syntax_test.py | 2 +- misc/write_fake_manifests.py | 2 +- src/browse.py | 4 +--- 5 files changed, 5 insertions(+), 11 deletions(-) mode change 100644 => 100755 misc/write_fake_manifests.py diff --git a/configure.py b/configure.py index 09c5b283e0..588250aa8a 100755 --- a/configure.py +++ b/configure.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright 2001 Google Inc. All Rights Reserved. # @@ -19,8 +19,6 @@ Projects that use ninja themselves should either write a similar script or use a meta-build system that supports Ninja output.""" -from __future__ import print_function - from optparse import OptionParser import os import pipes diff --git a/misc/measure.py b/misc/measure.py index 8ce95e696b..f3825efbb0 100755 --- a/misc/measure.py +++ b/misc/measure.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2011 Google Inc. All Rights Reserved. # @@ -17,8 +17,6 @@ """measure the runtime of a command by repeatedly running it. """ -from __future__ import print_function - import time import subprocess import sys diff --git a/misc/ninja_syntax_test.py b/misc/ninja_syntax_test.py index 90ff9c6bdb..61fb177d43 100755 --- a/misc/ninja_syntax_test.py +++ b/misc/ninja_syntax_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2011 Google Inc. All Rights Reserved. # diff --git a/misc/write_fake_manifests.py b/misc/write_fake_manifests.py old mode 100644 new mode 100755 index abcb677e18..bf9cf7de92 --- a/misc/write_fake_manifests.py +++ b/misc/write_fake_manifests.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Writes large manifest files, for manifest parser performance testing. diff --git a/src/browse.py b/src/browse.py index 653cbe91f6..b125e805a9 100755 --- a/src/browse.py +++ b/src/browse.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright 2001 Google Inc. All Rights Reserved. # @@ -20,8 +20,6 @@ it when needed. """ -from __future__ import print_function - try: import http.server as httpserver import socketserver From accd931de51d7cd9e2ce7420d1b0966f07d3ac41 Mon Sep 17 00:00:00 2001 From: Orgad Shaneh Date: Sat, 14 Jan 2023 22:16:04 +0200 Subject: [PATCH 066/127] Remove auto_ptr C++11 is now a hard requirement. --- src/build.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/build.h b/src/build.h index d727a8a480..8ec2355f7e 100644 --- a/src/build.h +++ b/src/build.h @@ -215,11 +215,7 @@ struct Builder { State* state_; const BuildConfig& config_; Plan plan_; -#if __cplusplus < 201703L - std::auto_ptr command_runner_; -#else - std::unique_ptr command_runner_; // auto_ptr was removed in C++17. -#endif + std::unique_ptr command_runner_; Status* status_; private: From 6fd567bbb1274742bf59bcc3dd9c3e917e7ce599 Mon Sep 17 00:00:00 2001 From: scivision Date: Tue, 24 Jan 2023 22:51:15 -0500 Subject: [PATCH 067/127] ci:macos package with macos 12 for macos >= 10.15 when this parameter was added, it was also for the last version that recently went end of life --- .github/workflows/macos.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 0797433267..96b7cf17ff 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -8,7 +8,7 @@ on: jobs: build: - runs-on: macos-11.0 + runs-on: macos-12 steps: - uses: actions/checkout@v2 @@ -19,7 +19,7 @@ jobs: - name: Build ninja shell: bash env: - MACOSX_DEPLOYMENT_TARGET: 10.12 + MACOSX_DEPLOYMENT_TARGET: 10.15 run: | cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64' cmake --build build --config Release From 18d692c9636a9dd744a3f8540d6ac35a06481640 Mon Sep 17 00:00:00 2001 From: tocic Date: Fri, 27 Jan 2023 09:29:31 +0300 Subject: [PATCH 068/127] Fix typos in docs --- README.md | 2 +- doc/manual.asciidoc | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index d7637663e7..1ca56c5183 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ See [the manual](https://ninja-build.org/manual.html) or `doc/manual.asciidoc` included in the distribution for background and more details. -Binaries for Linux, Mac, and Windows are available at +Binaries for Linux, Mac and Windows are available on [GitHub](https://github.com/ninja-build/ninja/releases). Run `./ninja -h` for Ninja help. diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc index 214dca4a57..2479aac225 100644 --- a/doc/manual.asciidoc +++ b/doc/manual.asciidoc @@ -24,7 +24,7 @@ Where other build systems are high-level languages, Ninja aims to be an assembler. Build systems get slow when they need to make decisions. When you are -in a edit-compile cycle you want it to be as fast as possible -- you +in an edit-compile cycle you want it to be as fast as possible -- you want the build system to do the minimum work necessary to figure out what needs to be built immediately. @@ -222,14 +222,14 @@ found useful during Ninja's development. The current tools are: `browse`:: browse the dependency graph in a web browser. Clicking a file focuses the view on that file, showing inputs and outputs. This -feature requires a Python installation. By default port 8000 is used +feature requires a Python installation. By default, port 8000 is used and a web browser will be opened. This can be changed as follows: + ---- ninja -t browse --port=8000 --no-browser mytarget ---- + -`graph`:: output a file in the syntax used by `graphviz`, a automatic +`graph`:: output a file in the syntax used by `graphviz`, an automatic graph layout tool. Use it like: + ---- @@ -261,7 +261,7 @@ output files are out of date. rebuild those targets. _Available since Ninja 1.11._ -`clean`:: remove built files. By default it removes all built files +`clean`:: remove built files. By default, it removes all built files except for those created by the generator. Adding the `-g` flag also removes built files created by the generator (see <>). Additional arguments are @@ -674,14 +674,14 @@ Ninja supports this processing in two forms. as a temporary). 2. `deps = msvc` specifies that the tool outputs header dependencies - in the form produced by Visual Studio's compiler's + in the form produced by the Visual Studio compiler's http://msdn.microsoft.com/en-us/library/hdkef6tk(v=vs.90).aspx[`/showIncludes` flag]. Briefly, this means the tool outputs specially-formatted lines to its stdout. Ninja then filters these lines from the displayed output. No `depfile` attribute is necessary, but the localized string - in front of the the header file path. For instance + in front of the header file path should be globally defined. For instance, `msvc_deps_prefix = Note: including file:` - for a English Visual Studio (the default). Should be globally defined. + for an English Visual Studio (the default). + ---- msvc_deps_prefix = Note: including file: @@ -964,14 +964,14 @@ Fundamentally, command lines behave differently on Unixes and Windows. On Unixes, commands are arrays of arguments. The Ninja `command` variable is passed directly to `sh -c`, which is then responsible for -interpreting that string into an argv array. Therefore the quoting +interpreting that string into an argv array. Therefore, the quoting rules are those of the shell, and you can use all the normal shell operators, like `&&` to chain multiple commands, or `VAR=value cmd` to set environment variables. On Windows, commands are strings, so Ninja passes the `command` string directly to `CreateProcess`. (In the common case of simply executing -a compiler this means there is less overhead.) Consequently the +a compiler this means there is less overhead.) Consequently, the quoting rules are determined by the called program, which on Windows are usually provided by the C library. If you need shell interpretation of the command (such as the use of `&&` to chain @@ -1064,7 +1064,7 @@ A build edge can list another build edge as a validation even if the second edge depends on the first. Validations are designed to handle rules that perform error checking but -don't produce any artifacts needed by the build, for example static +don't produce any artifacts needed by the build, for example, static analysis tools. Marking the static analysis rule as an implicit input of the main build rule of the source files or of the rules that depend on the main build rule would slow down the critical path of the build, From 2d9083b2608bd60c31583193d321d13a81a75beb Mon Sep 17 00:00:00 2001 From: Nikhil Dabas Date: Tue, 7 Feb 2023 21:28:29 +0530 Subject: [PATCH 069/127] Clean up Windows workflow and add an x86 build (#2251) * Clean up Windows workflow, add support for x86 * Sensible naming for artifacts No suffix for the x64 version, as that was the default already. * Remove x86 build from Windows workflow --- .github/workflows/windows.yml | 59 +++++++++-------------------------- 1 file changed, 15 insertions(+), 44 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 5ef1494764..b6ec2ac32f 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -10,6 +10,15 @@ jobs: build: runs-on: windows-latest + strategy: + fail-fast: false + matrix: + include: + - arch: 'x64' + suffix: '' + - arch: 'arm64' + suffix: 'arm64' + steps: - uses: actions/checkout@v2 @@ -19,15 +28,17 @@ jobs: - name: Build ninja shell: bash run: | - cmake -Bbuild + cmake -Bbuild -A ${{ matrix.arch }} cmake --build build --parallel --config Debug cmake --build build --parallel --config Release - name: Test ninja (Debug) + if: matrix.arch != 'arm64' run: .\ninja_test.exe working-directory: build/Debug - name: Test ninja (Release) + if: matrix.arch != 'arm64' run: .\ninja_test.exe working-directory: build/Release @@ -35,47 +46,7 @@ jobs: shell: bash run: | mkdir artifact - 7z a artifact/ninja-win.zip ./build/Release/ninja.exe - - # Upload ninja binary archive as an artifact - - name: Upload artifact - uses: actions/upload-artifact@v1 - with: - name: ninja-binary-archives - path: artifact - - - name: Upload release asset - if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0.1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: ./artifact/ninja-win.zip - asset_name: ninja-win.zip - asset_content_type: application/zip - - build-arm64: - runs-on: windows-latest - - steps: - - uses: actions/checkout@v2 - - - name: Install dependencies - run: choco install re2c - - - name: Build ninja - shell: bash - run: | - cmake -Bbuild -A arm64 - cmake --build build --parallel --config Debug - cmake --build build --parallel --config Release - - - name: Create ninja archive - shell: bash - run: | - mkdir artifact - 7z a artifact/ninja-winarm64.zip ./build/Release/ninja.exe + 7z a artifact/ninja-win${{ matrix.suffix }}.zip ./build/Release/ninja.exe # Upload ninja binary archive as an artifact - name: Upload artifact @@ -91,6 +62,6 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ github.event.release.upload_url }} - asset_path: ./artifact/ninja-winarm64.zip - asset_name: ninja-winarm64.zip + asset_path: ./artifact/ninja-win${{ matrix.suffix }}.zip + asset_name: ninja-win${{ matrix.suffix }}.zip asset_content_type: application/zip From da6645e7d75065e170126785335a1de03afd7dff Mon Sep 17 00:00:00 2001 From: Konstantin Kharlamov Date: Tue, 28 Mar 2023 18:46:05 +0300 Subject: [PATCH 070/127] ninja-mode.el: fix variables highlighting The code previously did not take into account that between the name and assignment may be any number of space including no space at all. It was also incorrectly highlighting in a code like foo = bar = buzz the `bar` as a variable, even though the `bar = buzz` is just a text that gets assigned to `foo`, i.e. `bar` is not a variable. Fix that. --- misc/ninja-mode.el | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el index 8b975d5156..76abf811f5 100644 --- a/misc/ninja-mode.el +++ b/misc/ninja-mode.el @@ -28,7 +28,7 @@ "pool" "default") 'words)) . font-lock-keyword-face) - ("\\([[:alnum:]_]+\\) =" 1 font-lock-variable-name-face) + ("^[[:space:]]*\\([[:alnum:]_]+\\)[[:space:]]*=" 1 font-lock-variable-name-face) ;; Variable expansion. ("$[[:alnum:]_]+" . font-lock-variable-name-face) ("${[[:alnum:]._]+}" . font-lock-variable-name-face) From 74642c5a6fd68ae71545796f3f7a7ee3e641a2f7 Mon Sep 17 00:00:00 2001 From: Konstantin Kharlamov Date: Tue, 28 Mar 2023 18:50:18 +0300 Subject: [PATCH 071/127] ninja-mode.el: add support for indentation We default `ninja-indent-offset` to `2` because Meson and CMake use `1` and `2` values accordingly, but `1` seems like too little, so use `2`. The correctness was tested in particular on two `build.ninja` files, one generated by Meson (1790 lines), and another by CMake (7777 lines). After setting `ninja-indent-offset` to the expected by the file value and re-indenting whole file the buffer was left unchanged, i.e. the calculated offset matched the ones already used. --- misc/ninja-mode.el | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el index 76abf811f5..d4f06e693f 100644 --- a/misc/ninja-mode.el +++ b/misc/ninja-mode.el @@ -19,15 +19,21 @@ ;;; Commentary: ;; Simple emacs mode for editing .ninja files. -;; Just some syntax highlighting for now. ;;; Code: +(defcustom ninja-indent-offset 2 + "*Amount of offset per level of indentation." + :type 'integer + :safe 'natnump + :group 'ninja) + +(defconst ninja-keywords-re + (concat "^" (regexp-opt '("rule" "build" "subninja" "include" "pool" "default") + 'words))) + (defvar ninja-keywords - `((,(concat "^" (regexp-opt '("rule" "build" "subninja" "include" - "pool" "default") - 'words)) - . font-lock-keyword-face) + `((,ninja-keywords-re . font-lock-keyword-face) ("^[[:space:]]*\\([[:alnum:]_]+\\)[[:space:]]*=" 1 font-lock-variable-name-face) ;; Variable expansion. ("$[[:alnum:]_]+" . font-lock-variable-name-face) @@ -69,11 +75,30 @@ (unless (= line-end (1+ (buffer-size))) (put-text-property line-end (1+ line-end) 'syntax-table '(12))))))))) +(defun ninja-compute-indentation () + "Calculate indentation for the current line." + (save-excursion + (beginning-of-line) + (if (or (looking-at ninja-keywords-re) + (= (line-number-at-pos) 1)) + 0 + (forward-line -1) + (if (looking-at ninja-keywords-re) + ninja-indent-offset + (current-indentation))))) + +(defun ninja-indent-line () + "Indent the current line. Uses previous indentation level if + available or `ninja-indent-offset'" + (interactive "*") + (indent-line-to (ninja-compute-indentation))) + ;;;###autoload (define-derived-mode ninja-mode prog-mode "ninja" (set (make-local-variable 'comment-start) "#") (set (make-local-variable 'parse-sexp-lookup-properties) t) (set (make-local-variable 'syntax-propertize-function) #'ninja-syntax-propertize) + (set (make-local-variable 'indent-line-function) 'ninja-indent-line) (setq font-lock-defaults '(ninja-keywords))) ;; Run ninja-mode for files ending in .ninja. From 96d408186bed962f6636eac216510ec2bd706eff Mon Sep 17 00:00:00 2001 From: Waleed Khan Date: Fri, 31 Mar 2023 14:02:41 -0700 Subject: [PATCH 072/127] Fix formatting for `msvc` tool documentation --- doc/manual.asciidoc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc index 2479aac225..22601e11ce 100644 --- a/doc/manual.asciidoc +++ b/doc/manual.asciidoc @@ -324,20 +324,19 @@ Where `ENVFILE` is a binary file that contains an environment block suitable for CreateProcessA() on Windows (i.e. a series of zero-terminated strings that look like NAME=VALUE, followed by an extra zero terminator). Note that this uses the local codepage encoding. - ++ This tool also supports a deprecated way of parsing the compiler's output when -the `/showIncludes` flag is used, and generating a GCC-compatible depfile from it. +the `/showIncludes` flag is used, and generating a GCC-compatible depfile from it: + ---- +---- ninja -t msvc -o DEPFILE [-p STRING] -- cl.exe /showIncludes ---- +---- + - When using this option, `-p STRING` can be used to pass the localized line prefix that `cl.exe` uses to output dependency information. For English-speaking regions this is `"Note: including file: "` without the double quotes, but will be different for other regions. - ++ Note that Ninja supports this natively now, with the use of `deps = msvc` and `msvc_deps_prefix` in Ninja files. Native support also avoids launching an extra tool process each time the compiler must be called, which can speed up builds From c266b21674171dfb39d10872bec6ac493665cc5d Mon Sep 17 00:00:00 2001 From: Junji Watanabe Date: Wed, 19 Apr 2023 02:59:43 +0900 Subject: [PATCH 073/127] Increment ninja_log version (#2240) * Increment ninjg_log version * fix test * remove code for ninjg_log version 4 * line length * split error messages --- src/build_log.cc | 26 ++++++++++++++------------ src/build_log_test.cc | 32 +++++++++++++++++++------------- 2 files changed, 33 insertions(+), 25 deletions(-) diff --git a/src/build_log.cc b/src/build_log.cc index b35279d410..cf2118251c 100644 --- a/src/build_log.cc +++ b/src/build_log.cc @@ -53,8 +53,8 @@ using namespace std; namespace { const char kFileSignature[] = "# ninja log v%d\n"; -const int kOldestSupportedVersion = 4; -const int kCurrentVersion = 5; +const int kOldestSupportedVersion = 6; +const int kCurrentVersion = 6; // 64bit MurmurHash2, by Austin Appleby #if defined(_MSC_VER) @@ -279,9 +279,16 @@ LoadStatus BuildLog::Load(const string& path, string* err) { if (!log_version) { sscanf(line_start, kFileSignature, &log_version); + bool invalid_log_version = false; if (log_version < kOldestSupportedVersion) { - *err = ("build log version invalid, perhaps due to being too old; " - "starting over"); + invalid_log_version = true; + *err = "build log version is too old; starting over"; + + } else if (log_version > kCurrentVersion) { + invalid_log_version = true; + *err = "build log version is too new; starting over"; + } + if (invalid_log_version) { fclose(file); unlink(path.c_str()); // Don't report this as a failure. An empty build log will cause @@ -344,14 +351,9 @@ LoadStatus BuildLog::Load(const string& path, string* err) { entry->start_time = start_time; entry->end_time = end_time; entry->mtime = mtime; - if (log_version >= 5) { - char c = *end; *end = '\0'; - entry->command_hash = (uint64_t)strtoull(start, NULL, 16); - *end = c; - } else { - entry->command_hash = LogEntry::HashCommand(StringPiece(start, - end - start)); - } + char c = *end; *end = '\0'; + entry->command_hash = (uint64_t)strtoull(start, NULL, 16); + *end = c; } fclose(file); diff --git a/src/build_log_test.cc b/src/build_log_test.cc index f03100d809..12c2dc742c 100644 --- a/src/build_log_test.cc +++ b/src/build_log_test.cc @@ -104,9 +104,11 @@ TEST_F(BuildLogTest, FirstWriteAddsSignature) { TEST_F(BuildLogTest, DoubleEntry) { FILE* f = fopen(kTestFilename, "wb"); - fprintf(f, "# ninja log v4\n"); - fprintf(f, "0\t1\t2\tout\tcommand abc\n"); - fprintf(f, "3\t4\t5\tout\tcommand def\n"); + fprintf(f, "# ninja log v6\n"); + fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n", + BuildLog::LogEntry::HashCommand("command abc")); + fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n", + BuildLog::LogEntry::HashCommand("command def")); fclose(f); string err; @@ -173,10 +175,11 @@ TEST_F(BuildLogTest, ObsoleteOldVersion) { ASSERT_NE(err.find("version"), string::npos); } -TEST_F(BuildLogTest, SpacesInOutputV4) { +TEST_F(BuildLogTest, SpacesInOutput) { FILE* f = fopen(kTestFilename, "wb"); - fprintf(f, "# ninja log v4\n"); - fprintf(f, "123\t456\t456\tout with space\tcommand\n"); + fprintf(f, "# ninja log v6\n"); + fprintf(f, "123\t456\t456\tout with space\t%" PRIx64 "\n", + BuildLog::LogEntry::HashCommand("command")); fclose(f); string err; @@ -197,10 +200,12 @@ TEST_F(BuildLogTest, DuplicateVersionHeader) { // build log on Windows. This shouldn't crash, and the second version header // should be ignored. FILE* f = fopen(kTestFilename, "wb"); - fprintf(f, "# ninja log v4\n"); - fprintf(f, "123\t456\t456\tout\tcommand\n"); - fprintf(f, "# ninja log v4\n"); - fprintf(f, "456\t789\t789\tout2\tcommand2\n"); + fprintf(f, "# ninja log v6\n"); + fprintf(f, "123\t456\t456\tout\t%" PRIx64 "\n", + BuildLog::LogEntry::HashCommand("command")); + fprintf(f, "# ninja log v6\n"); + fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n", + BuildLog::LogEntry::HashCommand("command2")); fclose(f); string err; @@ -247,7 +252,7 @@ struct TestDiskInterface : public DiskInterface { TEST_F(BuildLogTest, Restat) { FILE* f = fopen(kTestFilename, "wb"); - fprintf(f, "# ninja log v4\n" + fprintf(f, "# ninja log v6\n" "1\t2\t3\tout\tcommand\n"); fclose(f); std::string err; @@ -275,12 +280,13 @@ TEST_F(BuildLogTest, VeryLongInputLine) { // Ninja's build log buffer is currently 256kB. Lines longer than that are // silently ignored, but don't affect parsing of other lines. FILE* f = fopen(kTestFilename, "wb"); - fprintf(f, "# ninja log v4\n"); + fprintf(f, "# ninja log v6\n"); fprintf(f, "123\t456\t456\tout\tcommand start"); for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i) fputs(" more_command", f); fprintf(f, "\n"); - fprintf(f, "456\t789\t789\tout2\tcommand2\n"); + fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n", + BuildLog::LogEntry::HashCommand("command2")); fclose(f); string err; From 63415a419815d844149557aa16fa8a0f72b0c7e6 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Wed, 19 Apr 2023 18:57:11 +0200 Subject: [PATCH 074/127] Enable ppoll() usage when available. This patch modifies the CMakeLists.txt file to probe for ppoll() on the target system, and define -DUSE_PPOLL=1 if it is available. This can be disabled by setting -DNINJA_FORCE_PSELECT=ON when invoking CMake. This matches the default behavior of the configure.py script (and its `--force-pselect` option). Note that there is no noticeable performance difference before build commands are launched, so this change is very hard to benchmark properly. Fix for https://github.com/ninja-build/ninja/issues/1821 --- CMakeLists.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 98f7948b77..fd32ab10bf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,6 +4,7 @@ include(CheckSymbolExists) include(CheckIPOSupported) option(NINJA_BUILD_BINARY "Build ninja binary" ON) +option(NINJA_FORCE_PSELECT "Use pselect() even on platforms that provide ppoll()" OFF) project(ninja CXX) @@ -35,6 +36,16 @@ else() if(flag_color_diag) add_compile_options(-fdiagnostics-color) endif() + + if(NOT NINJA_FORCE_PSELECT) + # Check whether ppoll() is usable on the target platform. + # Set -DUSE_PPOLL=1 if this is the case. + include(CheckSymbolExists) + check_symbol_exists(ppoll poll.h HAVE_PPOLL) + if(HAVE_PPOLL) + add_compile_definitions(USE_PPOLL=1) + endif() + endif() endif() # --- optional re2c From 5f8a1fcd3335b45c1452e88b8ab87458757d212c Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Mon, 17 Apr 2023 11:44:44 +0200 Subject: [PATCH 075/127] Allow duplicate rule variable usage. This fixes #1966 by removing the variable name from the lookups stack once the recursive lookup call has been performed. Without this, any previously expanded variable could no longer be referenced in the command, as Ninja would (incorrectly) complain about a cyclical dependency. --- misc/output_test.py | 13 ++++++++++++ src/graph.cc | 49 +++++++++++++++++++++++++++++++++++++++------ 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/misc/output_test.py b/misc/output_test.py index 141716c136..94d1fda9fe 100755 --- a/misc/output_test.py +++ b/misc/output_test.py @@ -112,6 +112,19 @@ def test_issue_1214(self): \x1b[31mred\x1b[0m ''') + def test_issue_1966(self): + self.assertEqual(run( +'''rule cat + command = cat $rspfile $rspfile > $out + rspfile = cat.rsp + rspfile_content = a b c + +build a: cat +''', '-j3'), +'''[1/1] cat cat.rsp cat.rsp > a\x1b[K +''') + + def test_pr_1685(self): # Running those tools without .ninja_deps and .ninja_log shouldn't fail. self.assertEqual(run('', flags='-t recompact'), '') diff --git a/src/graph.cc b/src/graph.cc index 95fc1dc1b5..199294d481 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -392,7 +392,7 @@ struct EdgeEnv : public Env { std::string MakePathList(const Node* const* span, size_t size, char sep) const; private: - vector lookups_; + std::vector lookups_; const Edge* const edge_; EscapeKind escape_in_out_; bool recursive_; @@ -409,10 +409,43 @@ string EdgeEnv::LookupVariable(const string& var) { return MakePathList(&edge_->outputs_[0], explicit_outs_count, ' '); } + // Technical note about the lookups_ vector. + // + // This is used to detect cycles during recursive variable expansion + // which can be seen as a graph traversal problem. Consider the following + // example: + // + // rule something + // command = $foo $foo $var1 + // var1 = $var2 + // var2 = $var3 + // var3 = $var1 + // foo = FOO + // + // Each variable definition can be seen as a node in a graph that looks + // like the following: + // + // command --> foo + // | + // v + // var1 <-----. + // | | + // v | + // var2 ---> var3 + // + // The lookups_ vector is used as a stack of visited nodes/variables + // during recursive expansion. Entering a node adds an item to the + // stack, leaving the node removes it. + // + // The recursive_ flag is used as a small performance optimization + // to never record the starting node in the stack when beginning a new + // expansion, since in most cases, expansions are not recursive + // at all. + // if (recursive_) { - vector::const_iterator it; - if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) { - string cycle; + auto it = std::find(lookups_.begin(), lookups_.end(), var); + if (it != lookups_.end()) { + std::string cycle; for (; it != lookups_.end(); ++it) cycle.append(*it + " -> "); cycle.append(var); @@ -422,13 +455,17 @@ string EdgeEnv::LookupVariable(const string& var) { // See notes on BindingEnv::LookupWithFallback. const EvalString* eval = edge_->rule_->GetBinding(var); - if (recursive_ && eval) + bool record_varname = recursive_ && eval; + if (record_varname) lookups_.push_back(var); // In practice, variables defined on rules never use another rule variable. // For performance, only start checking for cycles after the first lookup. recursive_ = true; - return edge_->env_->LookupWithFallback(var, eval, this); + std::string result = edge_->env_->LookupWithFallback(var, eval, this); + if (record_varname) + lookups_.pop_back(); + return result; } std::string EdgeEnv::MakePathList(const Node* const* const span, From 3d1b382f626ef72187adbc68ad4a5c64afd5f513 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Wed, 10 May 2023 18:13:09 +0200 Subject: [PATCH 076/127] CMakeLists: Fix ppoll() feature detection. Use check_cxx_symbol_exists() instead of check_symbol_exists() for the subtle reason explained in the comment added in this patch. Change-Id: Id949b8f2c7af39eeef3a848d3bdbc7639432f38a --- CMakeLists.txt | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index fd32ab10bf..ac62a490b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,8 +40,16 @@ else() if(NOT NINJA_FORCE_PSELECT) # Check whether ppoll() is usable on the target platform. # Set -DUSE_PPOLL=1 if this is the case. - include(CheckSymbolExists) - check_symbol_exists(ppoll poll.h HAVE_PPOLL) + # + # NOTE: Use check_cxx_symbol_exists() instead of check_symbol_exists() + # because on Linux, only exposes the symbol when _GNU_SOURCE + # is defined. + # + # Both g++ and clang++ define the symbol by default, because the C++ + # standard library headers require it, but *not* gcc and clang, which + # are used by check_symbol_exists(). + include(CheckCXXSymbolExists) + check_cxx_symbol_exists(ppoll poll.h HAVE_PPOLL) if(HAVE_PPOLL) add_compile_definitions(USE_PPOLL=1) endif() From dd8f62c9088f94a4081f84cac0d05beb695f88f7 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Fri, 16 Jun 2023 00:23:47 +0200 Subject: [PATCH 077/127] metrics: use chrono to convert ticks to micros This was previously causing undefined behavior, as multiplying dt by 1000000 overflowed on some systems. Fixes #2301. --- src/metrics.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/metrics.cc b/src/metrics.cc index 9a4dd12469..632ae43c50 100644 --- a/src/metrics.cc +++ b/src/metrics.cc @@ -48,12 +48,17 @@ constexpr int64_t GetFrequency() { int64_t TimerToMicros(int64_t dt) { // dt is in ticks. We want microseconds. - return (dt * 1000000) / GetFrequency(); + return chrono::duration_cast( + std::chrono::steady_clock::duration{ dt }) + .count(); } int64_t TimerToMicros(double dt) { // dt is in ticks. We want microseconds. - return (dt * 1000000) / GetFrequency(); + using DoubleSteadyClock = + std::chrono::duration; + return chrono::duration_cast(DoubleSteadyClock{ dt }) + .count(); } } // anonymous namespace From 67834978a6abdfb790dac165b8b1f1c93648e624 Mon Sep 17 00:00:00 2001 From: Alexandru Croitor Date: Mon, 31 Jul 2023 16:22:06 +0200 Subject: [PATCH 078/127] Fix browse to work with Python 3.11+ Python sources should not contain null bytes, so don't pass the final string null terminator character to the python interpreter. Fixes: #2310 --- src/browse.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/browse.cc b/src/browse.cc index 76bee070d8..ac54207800 100644 --- a/src/browse.cc +++ b/src/browse.cc @@ -71,8 +71,13 @@ void RunBrowsePython(State* state, const char* ninja_command, close(pipefd[0]); // Write the script file into the stdin of the Python process. - ssize_t len = write(pipefd[1], kBrowsePy, sizeof(kBrowsePy)); - if (len < (ssize_t)sizeof(kBrowsePy)) + // Only write n - 1 bytes, because Python 3.11 does not allow null + // bytes in source code anymore, so avoid writing the null string + // terminator. + // See https://github.com/python/cpython/issues/96670 + auto kBrowsePyLength = sizeof(kBrowsePy) - 1; + ssize_t len = write(pipefd[1], kBrowsePy, kBrowsePyLength); + if (len < (ssize_t)kBrowsePyLength) perror("ninja: write"); close(pipefd[1]); exit(0); From 98bb926065926133278fa5dafd31bfc5007edf96 Mon Sep 17 00:00:00 2001 From: gerioldman Date: Sun, 27 Aug 2023 14:02:50 +0200 Subject: [PATCH 079/127] AddTasking RSP syntax to printCompdb functionality --- src/ninja.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/ninja.cc b/src/ninja.cc index 887d89f8d8..2520f31174 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -882,7 +882,7 @@ std::string EvaluateCommandWithRspfile(const Edge* edge, return command; size_t index = command.find(rspfile); - if (index == 0 || index == string::npos || command[index - 1] != '@') + if (index == 0 || index == string::npos || ( command[index - 1] != '@' && command.find("--option-file=") != index - 14 && command.find("-f ") != index - 3 )) return command; string rspfile_content = edge->GetBinding("rspfile_content"); @@ -892,7 +892,12 @@ std::string EvaluateCommandWithRspfile(const Edge* edge, rspfile_content.replace(newline_index, 1, 1, ' '); ++newline_index; } - command.replace(index - 1, rspfile.length() + 1, rspfile_content); + if (command[index - 1] == '@') + command.replace(index - 1, rspfile.length() + 1, rspfile_content); + else if (command.find("-f ") == index - 3) + command.replace(index - 3, rspfile.length() + 3, rspfile_content); + else + command.replace(index - 14, rspfile.length() + 14, rspfile_content); return command; } From 2dfafabc148e3d35f651a9a4b59708039a53c2ad Mon Sep 17 00:00:00 2001 From: gerioldman Date: Wed, 6 Sep 2023 16:19:07 +0200 Subject: [PATCH 080/127] Correct formatting --- src/ninja.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/ninja.cc b/src/ninja.cc index 2520f31174..39672c3d1c 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -882,7 +882,10 @@ std::string EvaluateCommandWithRspfile(const Edge* edge, return command; size_t index = command.find(rspfile); - if (index == 0 || index == string::npos || ( command[index - 1] != '@' && command.find("--option-file=") != index - 14 && command.find("-f ") != index - 3 )) + if (index == 0 || index == string::npos || + (command[index - 1] != '@' && + command.find("--option-file=") != index - 14 && + command.find("-f ") != index - 3)) return command; string rspfile_content = edge->GetBinding("rspfile_content"); @@ -892,12 +895,13 @@ std::string EvaluateCommandWithRspfile(const Edge* edge, rspfile_content.replace(newline_index, 1, 1, ' '); ++newline_index; } - if (command[index - 1] == '@') + if (command[index - 1] == '@') { command.replace(index - 1, rspfile.length() + 1, rspfile_content); - else if (command.find("-f ") == index - 3) + } else if (command.find("-f ") == index - 3) { command.replace(index - 3, rspfile.length() + 3, rspfile_content); - else + } else { // --option-file syntax command.replace(index - 14, rspfile.length() + 14, rspfile_content); + } return command; } From 203f098d267237eeecb651b74971059e5088ea08 Mon Sep 17 00:00:00 2001 From: Daniel Brondani Date: Thu, 7 Sep 2023 17:44:32 +0200 Subject: [PATCH 081/127] Fix check of filename length --- CMakeLists.txt | 3 ++- src/disk_interface.cc | 23 ++++++++++++++++++++++- src/disk_interface.h | 14 +++++++++----- src/disk_interface_test.cc | 19 +++++++++++++++++++ 4 files changed, 52 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ac62a490b3..d0c62f9005 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -246,7 +246,8 @@ if(BUILD_TESTING) src/util_test.cc ) if(WIN32) - target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc) + target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc + windows/ninja.manifest) endif() target_link_libraries(ninja_test PRIVATE libninja libninja-re2c) diff --git a/src/disk_interface.cc b/src/disk_interface.cc index 1157463432..ed064e1555 100644 --- a/src/disk_interface.cc +++ b/src/disk_interface.cc @@ -26,6 +26,7 @@ #include #include #include // _mkdir +#include #else #include #endif @@ -157,13 +158,27 @@ bool DiskInterface::MakeDirs(const string& path) { } // RealDiskInterface ----------------------------------------------------------- +RealDiskInterface::RealDiskInterface() +#ifdef _WIN32 +: use_cache_(false), long_paths_enabled_(false) { + setlocale(LC_ALL, ""); + IFDYNAMICGETCACHEDFUNCTIONTYPEDEF(L"ntdll", BOOLEAN(WINAPI*)(), + "RtlAreLongPathsEnabled", + RtlAreLongPathsEnabled) { + long_paths_enabled_ = RtlAreLongPathsEnabled(); + } +} +#else +{} +#endif TimeStamp RealDiskInterface::Stat(const string& path, string* err) const { METRIC_RECORD("node stat"); #ifdef _WIN32 // MSDN: "Naming Files, Paths, and Namespaces" // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx - if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) { + if (!path.empty() && !AreLongPathsEnabled() && path[0] != '\\' && + path.size() > MAX_PATH) { ostringstream err_stream; err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH << " characters"; @@ -333,3 +348,9 @@ void RealDiskInterface::AllowStatCache(bool allow) { cache_.clear(); #endif } + +#ifdef _WIN32 +bool RealDiskInterface::AreLongPathsEnabled(void) const { + return long_paths_enabled_; +} +#endif diff --git a/src/disk_interface.h b/src/disk_interface.h index bc29ab78ec..74200b8f5b 100644 --- a/src/disk_interface.h +++ b/src/disk_interface.h @@ -69,11 +69,7 @@ struct DiskInterface: public FileReader { /// Implementation of DiskInterface that actually hits the disk. struct RealDiskInterface : public DiskInterface { - RealDiskInterface() -#ifdef _WIN32 - : use_cache_(false) -#endif - {} + RealDiskInterface(); virtual ~RealDiskInterface() {} virtual TimeStamp Stat(const std::string& path, std::string* err) const; virtual bool MakeDir(const std::string& path); @@ -85,11 +81,19 @@ struct RealDiskInterface : public DiskInterface { /// Whether stat information can be cached. Only has an effect on Windows. void AllowStatCache(bool allow); +#ifdef _WIN32 + /// Whether long paths are enabled. Only has an effect on Windows. + bool AreLongPathsEnabled() const; +#endif + private: #ifdef _WIN32 /// Whether stat information can be cached. bool use_cache_; + /// Whether long paths are enabled. + bool long_paths_enabled_; + typedef std::map DirCache; // TODO: Neither a map nor a hashmap seems ideal here. If the statcache // works out, come up with a better data structure. diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc index 294df72ea7..e8d869c871 100644 --- a/src/disk_interface_test.cc +++ b/src/disk_interface_test.cc @@ -17,6 +17,7 @@ #ifdef _WIN32 #include #include +#include #endif #include "disk_interface.h" @@ -96,6 +97,24 @@ TEST_F(DiskInterfaceTest, StatExistingFile) { EXPECT_EQ("", err); } +#ifdef _WIN32 +TEST_F(DiskInterfaceTest, StatExistingFileWithLongPath) { + string err; + char currentdir[32767]; + _getcwd(currentdir, sizeof(currentdir)); + const string filename = string(currentdir) + +"\\filename_with_256_characters_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ +xxxxxxxxxxxxxxxxxxxxx"; + const string prefixed = "\\\\?\\" + filename; + ASSERT_TRUE(Touch(prefixed.c_str())); + EXPECT_GT(disk_.Stat(disk_.AreLongPathsEnabled() ? + filename : prefixed, &err), 1); + EXPECT_EQ("", err); +} +#endif + TEST_F(DiskInterfaceTest, StatExistingDir) { string err; ASSERT_TRUE(disk_.MakeDir("subdir")); From eddafdd7e733241f92900035953f6bcf7c7677e3 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Wed, 20 Sep 2023 05:50:37 +0200 Subject: [PATCH 082/127] Fix Mingw cross-compilation. The previous pull request broke compilation with the Mingw toolchain, which does not provide . Moreover, this massive header was only used to expand a macro that generates a static volatile variable under the hood (which is not necessary here). Replace it with with a simpler and more portable code path to fix the build. --- src/disk_interface.cc | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/disk_interface.cc b/src/disk_interface.cc index ed064e1555..0f27e9da44 100644 --- a/src/disk_interface.cc +++ b/src/disk_interface.cc @@ -23,10 +23,10 @@ #include #ifdef _WIN32 -#include -#include #include // _mkdir -#include +#include + +#include #else #include #endif @@ -162,10 +162,16 @@ RealDiskInterface::RealDiskInterface() #ifdef _WIN32 : use_cache_(false), long_paths_enabled_(false) { setlocale(LC_ALL, ""); - IFDYNAMICGETCACHEDFUNCTIONTYPEDEF(L"ntdll", BOOLEAN(WINAPI*)(), - "RtlAreLongPathsEnabled", - RtlAreLongPathsEnabled) { - long_paths_enabled_ = RtlAreLongPathsEnabled(); + + // Probe ntdll.dll for RtlAreLongPathsEnabled, and call it if it exists. + HINSTANCE ntdll_lib = ::GetModuleHandleW(L"ntdll"); + if (ntdll_lib) { + typedef BOOLEAN(WINAPI FunctionType)(); + auto* func_ptr = reinterpret_cast( + ::GetProcAddress(ntdll_lib, "RtlAreLongPathsEnabled")); + if (func_ptr) { + long_paths_enabled_ = (*func_ptr)(); + } } } #else From 7289d5c2561f5789bec9c4f0114b73eb0e7476ee Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Thu, 12 May 2022 18:18:16 +0200 Subject: [PATCH 083/127] Fix .ninja parse time reported by `-d stats`. Because Parser::Load() is called recursively during Ninja manifest parsing, the call to METRIC_RECORD() in this function used to over-count the total parsing time (for example, by a factor of 2 for the Fuchsia build). This fixes the problem by introducing a new RECORD_METRIC_IF() macro, which only records anything if a given condition is true. This ensures that metric collection only starts and stops with the outer Parser::Load() call, and none of its recursive sub-calls. The effect on the output of `-d stats`is, for a Fuchsia build plan where `ninja -d stats nothing` takes a bit more than 5s: BEFORE: metric count avg (us) total (ms) .ninja parse 27304 372.6 10172.2 AFTER: metric count avg (us) total (ms) .ninja parse 1 4165297.0 4165.3 Note that |count| went to 1, since there is only one top-level Parser::Load() operation in this build. It would be more if dyndeps files were loaded, which does not happen in this build plan. --- src/metrics.h | 7 +++++++ src/parser.cc | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/metrics.h b/src/metrics.h index c9ba2366af..937d905de7 100644 --- a/src/metrics.h +++ b/src/metrics.h @@ -85,6 +85,13 @@ struct Stopwatch { g_metrics ? g_metrics->NewMetric(name) : NULL; \ ScopedMetric metrics_h_scoped(metrics_h_metric); +/// A variant of METRIC_RECORD that doesn't record anything if |condition| +/// is false. +#define METRIC_RECORD_IF(name, condition) \ + static Metric* metrics_h_metric = \ + g_metrics ? g_metrics->NewMetric(name) : NULL; \ + ScopedMetric metrics_h_scoped((condition) ? metrics_h_metric : NULL); + extern Metrics* g_metrics; #endif // NINJA_METRICS_H_ diff --git a/src/parser.cc b/src/parser.cc index 5f303c557c..139a347ac3 100644 --- a/src/parser.cc +++ b/src/parser.cc @@ -20,7 +20,10 @@ using namespace std; bool Parser::Load(const string& filename, string* err, Lexer* parent) { - METRIC_RECORD(".ninja parse"); + // If |parent| is not NULL, metrics collection has been started by a parent + // Parser::Load() in our call stack. Do not start a new one here to avoid + // over-counting parsing times. + METRIC_RECORD_IF(".ninja parse", parent == NULL); string contents; string read_err; if (file_reader_->ReadFile(filename, &contents, &read_err) != From cff9dce3cd2187a7671b05d2437a426ca6e06ca0 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Mon, 30 May 2022 23:04:46 +0200 Subject: [PATCH 084/127] Don't double-count the 'node stat' metric. This is already done in RealDiskInterface::Stat() itself, and removes a confusing duplicate line in `-d stats` output, e.g.: BEFORE: metric count avg (us) total (ms) ... node stat 119145 3.0 355.2 node stat 270673 3.1 834.9 AFTER: metric count avg (us) total (ms) ... node stat 270673 2.9 774.0 --- src/graph.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/graph.cc b/src/graph.cc index 199294d481..ad7031f92e 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -32,7 +32,6 @@ using namespace std; bool Node::Stat(DiskInterface* disk_interface, string* err) { - METRIC_RECORD("node stat"); mtime_ = disk_interface->Stat(path_, err); if (mtime_ == -1) { return false; From 0eb48440c4039f3b03f053c9622cd8ba4848ac47 Mon Sep 17 00:00:00 2001 From: Ewout ter Hoeven Date: Fri, 6 Oct 2023 11:14:45 +0200 Subject: [PATCH 085/127] Add Dependabot configuration for GitHub Actions updates Add a Dependabot configuration that checks once a week if the GitHub Actions are still using the latest version. If not, it opens a PR to update them. It will open few PRs, since only major versions are specified (like v3), so only on a major release (like v4) it will update and open a PR. This way it helps actively keep GitHub Actions workflows up to date and secure, while minimizing maintenance burden. See https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot --- .github/dependabot.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..6fddca0d6e --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" From f030a26688d9831b64b9ec4afc8f57c481adbc80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:27:20 +0000 Subject: [PATCH 086/127] Bump actions/upload-artifact from 1 to 3 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 1 to 3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v1...v3) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux.yml | 4 ++-- .github/workflows/macos.yml | 2 +- .github/workflows/windows.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 57a569e352..f158d1a1ef 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -58,7 +58,7 @@ jobs: # Upload ninja binary archive as an artifact - name: Upload artifact - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ninja-binary-archives path: artifact @@ -192,7 +192,7 @@ jobs: # Upload ninja binary archive as an artifact - name: Upload artifact - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ninja-binary-archives path: artifact diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 96b7cf17ff..5a230ae748 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -36,7 +36,7 @@ jobs: # Upload ninja binary archive as an artifact - name: Upload artifact - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ninja-binary-archives path: artifact diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index b6ec2ac32f..08bb3478b9 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -50,7 +50,7 @@ jobs: # Upload ninja binary archive as an artifact - name: Upload artifact - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: ninja-binary-archives path: artifact From 22e94dad2f560ad760f28bdf88b8bcc6a8250bc0 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Fri, 6 Oct 2023 16:47:03 +0200 Subject: [PATCH 087/127] Remove MSVC build from AppVeyor We have GitHub Actions now --- appveyor.yml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index f0b92b8e78..7c39abab87 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -8,7 +8,6 @@ environment: CHERE_INVOKING: 1 # Tell Bash to inherit the current working directory matrix: - MSYSTEM: MINGW64 - - MSYSTEM: MSVC - MSYSTEM: LINUX matrix: @@ -17,8 +16,6 @@ matrix: MSYSTEM: LINUX - image: Ubuntu1804 MSYSTEM: MINGW64 - - image: Ubuntu1804 - MSYSTEM: MSVC for: - @@ -32,22 +29,6 @@ for: ./ninja all\n ./ninja_test 2>&1\n ./misc/ninja_syntax_test.py 2>&1\n\"@" - - - matrix: - only: - - MSYSTEM: MSVC - build_script: - - cmd: >- - call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat" - - python configure.py --bootstrap - - ninja.bootstrap.exe all - - ninja_test - - python misc/ninja_syntax_test.py - - matrix: only: - image: Ubuntu1804 From eff5b4439f1637cbfaba62c76dce74e164748311 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Tue, 12 Sep 2023 11:40:02 +0200 Subject: [PATCH 088/127] Ensure tests do not leave stale files in current directory. This patch fixes a minor but annoying issue during development where some tests would leave stale files in the current directory. + Introduce new ScopedFilePath class to perform remove-on-scope-exit of a given file path. Fixes #1583 --- src/build_test.cc | 96 ++++++++++++++++++++++------------------ src/missing_deps_test.cc | 6 +++ src/test.cc | 26 +++++++++++ src/test.h | 27 +++++++++++ 4 files changed, 113 insertions(+), 42 deletions(-) diff --git a/src/build_test.cc b/src/build_test.cc index 3908761057..d32ad3e4c6 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -2228,8 +2228,8 @@ TEST_F(BuildTest, FailedDepsParse) { } struct BuildWithQueryDepsLogTest : public BuildTest { - BuildWithQueryDepsLogTest() : BuildTest(&log_) { - } + BuildWithQueryDepsLogTest() + : BuildTest(&log_), deps_log_file_("ninja_deps") {} ~BuildWithQueryDepsLogTest() { log_.Close(); @@ -2241,12 +2241,13 @@ struct BuildWithQueryDepsLogTest : public BuildTest { temp_dir_.CreateAndEnter("BuildWithQueryDepsLogTest"); std::string err; - ASSERT_TRUE(log_.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(log_.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); } ScopedTempDir temp_dir_; + ScopedFilePath deps_log_file_; DepsLog log_; }; @@ -2440,7 +2441,8 @@ TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOnlySecondaryOutput) { /// builder_ it sets up, because we want pristine objects for /// each build. struct BuildWithDepsLogTest : public BuildTest { - BuildWithDepsLogTest() {} + BuildWithDepsLogTest() + : build_log_file_("build_log"), deps_log_file_("ninja_deps") {} virtual void SetUp() { BuildTest::SetUp(); @@ -2453,6 +2455,8 @@ struct BuildWithDepsLogTest : public BuildTest { } ScopedTempDir temp_dir_; + ScopedFilePath build_log_file_; + ScopedFilePath deps_log_file_; /// Shadow parent class builder_ so we don't accidentally use it. void* builder_; @@ -2466,6 +2470,7 @@ TEST_F(BuildWithDepsLogTest, Straightforward) { "build out: cat in1\n" " deps = gcc\n" " depfile = in1.d\n"; + { State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); @@ -2473,7 +2478,7 @@ TEST_F(BuildWithDepsLogTest, Straightforward) { // Run the build once, everything should be ok. DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -2503,8 +2508,8 @@ TEST_F(BuildWithDepsLogTest, Straightforward) { // Run the build again. DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); @@ -2544,7 +2549,7 @@ TEST_F(BuildWithDepsLogTest, ObsoleteDeps) { // Run the build once, everything should be ok. DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -2573,8 +2578,8 @@ TEST_F(BuildWithDepsLogTest, ObsoleteDeps) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); @@ -2638,12 +2643,12 @@ TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceCondition) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); BuildLog build_log; - ASSERT_TRUE(build_log.Load("build_log", &err)); - ASSERT_TRUE(build_log.OpenForWrite("build_log", *this, &err)); + ASSERT_TRUE(build_log.Load(build_log_file_.path(), &err)); + ASSERT_TRUE(build_log.OpenForWrite(build_log_file_.path(), *this, &err)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); BuildLog::LogEntry* log_entry = NULL; { @@ -2720,12 +2725,12 @@ TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceConditionWithDepFile) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); BuildLog build_log; - ASSERT_TRUE(build_log.Load("build_log", &err)); - ASSERT_TRUE(build_log.OpenForWrite("build_log", *this, &err)); + ASSERT_TRUE(build_log.Load(build_log_file_.path(), &err)); + ASSERT_TRUE(build_log.OpenForWrite(build_log_file_.path(), *this, &err)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); { Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); @@ -2871,7 +2876,7 @@ TEST_F(BuildWithDepsLogTest, RestatDepfileDependencyDepsLog) { // Run the build once, everything should be ok. DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -2897,8 +2902,8 @@ TEST_F(BuildWithDepsLogTest, RestatDepfileDependencyDepsLog) { // Run the build again. DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); @@ -2930,7 +2935,7 @@ TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) { // Run the build once, everything should be ok. DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -2950,8 +2955,8 @@ TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -3001,7 +3006,7 @@ TEST_F(BuildWithDepsLogTest, DiscoveredDepDuringBuildChanged) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); @@ -3024,8 +3029,8 @@ TEST_F(BuildWithDepsLogTest, DiscoveredDepDuringBuildChanged) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); @@ -3047,8 +3052,8 @@ TEST_F(BuildWithDepsLogTest, DiscoveredDepDuringBuildChanged) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); @@ -3076,7 +3081,7 @@ TEST_F(BuildWithDepsLogTest, DepFileDepsLogCanonicalize) { // Run the build once, everything should be ok. DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -3098,8 +3103,8 @@ TEST_F(BuildWithDepsLogTest, DepFileDepsLogCanonicalize) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -3169,11 +3174,13 @@ TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) { fs_.Create("out.d", "out: header.h"); fs_.Create("header.h", ""); - RebuildTarget("out", manifest, "build_log", "ninja_deps"); + RebuildTarget("out", manifest, build_log_file_.c_str(), + deps_log_file_.c_str()); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // Sanity: this rebuild should be NOOP - RebuildTarget("out", manifest, "build_log", "ninja_deps"); + RebuildTarget("out", manifest, build_log_file_.c_str(), + deps_log_file_.c_str()); ASSERT_EQ(0u, command_runner_.commands_ran_.size()); // Touch 'header.in', blank dependencies log (create a different one). @@ -3182,12 +3189,14 @@ TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) { fs_.Tick(); fs_.Create("header.in", ""); + ScopedFilePath deps2_file_("ninja_deps2"); + // (switch to a new blank deps_log "ninja_deps2") - RebuildTarget("out", manifest, "build_log", "ninja_deps2"); + RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // Sanity: this build should be NOOP - RebuildTarget("out", manifest, "build_log", "ninja_deps2"); + RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(0u, command_runner_.commands_ran_.size()); // Check that invalidating deps by target timestamp also works here @@ -3195,11 +3204,11 @@ TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) { fs_.Tick(); fs_.Create("header.in", ""); fs_.Create("out", ""); - RebuildTarget("out", manifest, "build_log", "ninja_deps2"); + RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // And this build should be NOOP again - RebuildTarget("out", manifest, "build_log", "ninja_deps2"); + RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(0u, command_runner_.commands_ran_.size()); } @@ -3216,7 +3225,10 @@ TEST_F(BuildTest, WrongOutputInDepfileCausesRebuild) { fs_.Create("header.h", ""); fs_.Create("foo.o.d", "bar.o.d: header.h\n"); - RebuildTarget("foo.o", manifest, "build_log", "ninja_deps"); + ScopedFilePath build_log("build_log"); + ScopedFilePath deps_file("ninja_deps"); + + RebuildTarget("foo.o", manifest, build_log.c_str(), deps_file.c_str()); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } @@ -4173,7 +4185,7 @@ TEST_F(BuildWithDepsLogTest, ValidationThroughDepfile) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); @@ -4208,8 +4220,8 @@ TEST_F(BuildWithDepsLogTest, ValidationThroughDepfile) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; - ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err)); - ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err)); + ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); + ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); diff --git a/src/missing_deps_test.cc b/src/missing_deps_test.cc index db66885d07..95035d0f7e 100644 --- a/src/missing_deps_test.cc +++ b/src/missing_deps_test.cc @@ -36,6 +36,11 @@ struct MissingDependencyScannerTest : public testing::Test { ASSERT_EQ("", err); } + ~MissingDependencyScannerTest() { + // Remove test file. + deps_log_.Close(); + } + MissingDependencyScanner& scanner() { return scanner_; } void RecordDepsLogDep(const std::string& from, const std::string& to) { @@ -79,6 +84,7 @@ struct MissingDependencyScannerTest : public testing::Test { ASSERT_EQ(1u, scanner().generator_rules_.count(rule)); } + ScopedFilePath scoped_file_path_ = kTestDepsLogFilename; MissingDependencyTestDelegate delegate_; Rule generator_rule_; Rule compile_rule_; diff --git a/src/test.cc b/src/test.cc index 11b1c9ebf0..4d063da96e 100644 --- a/src/test.cc +++ b/src/test.cc @@ -235,3 +235,29 @@ void ScopedTempDir::Cleanup() { temp_dir_name_.clear(); } + +ScopedFilePath::ScopedFilePath(ScopedFilePath&& other) noexcept + : path_(std::move(other.path_)), released_(other.released_) { + other.released_ = true; +} + +/// It would be nice to use '= default' here instead but some old compilers +/// such as GCC from Ubuntu 16.06 will not compile it with "noexcept", so just +/// write it manually. +ScopedFilePath& ScopedFilePath::operator=(ScopedFilePath&& other) noexcept { + if (this != &other) { + this->~ScopedFilePath(); + new (this) ScopedFilePath(std::move(other)); + } + return *this; +} + +ScopedFilePath::~ScopedFilePath() { + if (!released_) { + unlink(path_.c_str()); + } +} + +void ScopedFilePath::Release() { + released_ = true; +} diff --git a/src/test.h b/src/test.h index 4552c34c88..238cb96b6d 100644 --- a/src/test.h +++ b/src/test.h @@ -182,4 +182,31 @@ struct ScopedTempDir { std::string temp_dir_name_; }; +/// A class that records a file path and ensures that it is removed +/// on destruction. This ensures that tests do not keep stale files in the +/// current directory where they run, even in case of assertion failure. +struct ScopedFilePath { + /// Constructor just records the file path. + ScopedFilePath(const std::string& path) : path_(path) {} + ScopedFilePath(const char* path) : path_(path) {} + + /// Allow move operations. + ScopedFilePath(ScopedFilePath&&) noexcept; + ScopedFilePath& operator=(ScopedFilePath&&) noexcept; + + /// Destructor destroys the file, unless Release() was called. + ~ScopedFilePath(); + + /// Release the file, the destructor will not remove the file. + void Release(); + + const char* c_str() const { return path_.c_str(); } + const std::string& path() const { return path_; } + bool released() const { return released_; } + + private: + std::string path_; + bool released_ = false; +}; + #endif // NINJA_TEST_H_ From 9cf13cd1ecb7ae649394f4133d121a01e191560b Mon Sep 17 00:00:00 2001 From: Byoungchan Lee Date: Mon, 9 Oct 2023 20:13:20 +0900 Subject: [PATCH 089/127] Replace pipes.quote with shlex.quote in configure.py Python 3.12 deprecated the pipes module and it will be removed in Python 3.13. In configure.py, I have replaced the usage of pipes.quote with shlex.quote, which is the exactly same function as pipes.quote. For more details, refer to PEP 0594: https://peps.python.org/pep-0594 --- configure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.py b/configure.py index 588250aa8a..c6973cd1a5 100755 --- a/configure.py +++ b/configure.py @@ -21,7 +21,7 @@ from optparse import OptionParser import os -import pipes +import shlex import string import subprocess import sys @@ -262,7 +262,7 @@ def _run_command(self, cmdline): env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS']) configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys) if configure_env: - config_str = ' '.join([k + '=' + pipes.quote(configure_env[k]) + config_str = ' '.join([k + '=' + shlex.quote(configure_env[k]) for k in configure_env]) n.variable('configure_env', config_str + '$ ') n.newline() From 0a9c9c5f50c60de4a7acfed8aaa048c74cd2f43b Mon Sep 17 00:00:00 2001 From: Byoungchan Lee Date: Mon, 9 Oct 2023 20:13:50 +0900 Subject: [PATCH 090/127] Remove unused module string in configure.py --- configure.py | 1 - 1 file changed, 1 deletion(-) diff --git a/configure.py b/configure.py index c6973cd1a5..939153df60 100755 --- a/configure.py +++ b/configure.py @@ -22,7 +22,6 @@ from optparse import OptionParser import os import shlex -import string import subprocess import sys From 44a402f9c4912aea880727dfa3e110513042e471 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Mon, 26 Apr 2021 15:48:36 +0300 Subject: [PATCH 091/127] Reliable ETA and progress percentage. This has been bugging me for *years*. :) Count of finished edges isn't a great statistic, it isn't really obvious if LLVM will take 8 minues to build, or 10 minutes. But, it's actually pretty straight-forward to get some more useful information. We already know how much time each edge has taken, so we could just do the dumb thing, and assume that every edge in the plan takes the same amount of time. Or, we can do better. `.ninja_log` already contains the historical data on how long each edge took to produce it's outs, so we simply need to ensure that we populate edges with that info, and then we can greatly improve our predictions. The math is pretty simple i think. This is largely a port of a similar change i did to LLVM LIT: https://reviews.llvm.org/D99073 With this, i get something quite lovely: ``` llvm-project/build-Clang12$ NINJA_STATUS="[%f/%t %p %P][%w + %W] " /repositories/ninja/build-Clang-debug/ninja opt [288/2527 11% 4%][00:27 + 08:52] Building CXX object lib/DebugInfo/CodeView/CMakeFiles/LLVMDebugInfoCodeView.dir/AppendingTypeTableBuilder.cpp.o ``` I hope people will find this useful, and it could be merged. --- doc/manual.asciidoc | 6 +- src/build.cc | 22 +++-- src/build_test.cc | 23 ++++- src/graph.h | 4 + src/ninja.cc | 19 ++++ src/status.cc | 210 +++++++++++++++++++++++++++++++++++++++++--- src/status.h | 47 ++++++++-- src/status_test.cc | 5 +- 8 files changed, 298 insertions(+), 38 deletions(-) diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc index 22601e11ce..d01b75a334 100644 --- a/doc/manual.asciidoc +++ b/doc/manual.asciidoc @@ -204,7 +204,11 @@ Several placeholders are available: `%o`:: Overall rate of finished edges per second `%c`:: Current rate of finished edges per second (average over builds specified by `-j` or its default) -`%e`:: Elapsed time in seconds. _(Available since Ninja 1.2.)_ +`%e`:: Elapsed time in seconds. _(Available since Ninja 1.2.)_ +`%E`:: Remaining time (ETA) in seconds. _(Available since Ninja 1.12.)_ +`%w`:: Elapsed time in [h:]mm:ss format. _(Available since Ninja 1.12.)_ +`%W`:: Remaining time (ETA) in [h:]mm:ss format. _(Available since Ninja 1.12.)_ +`%P`:: The percentage (in ppp% format) of time elapsed out of predicted total runtime. _(Available since Ninja 1.12.)_ `%%`:: A plain `%` character. The default progress status is `"[%f/%t] "` (note the trailing space diff --git a/src/build.cc b/src/build.cc index 76ff93af03..dcda6b81fe 100644 --- a/src/build.cc +++ b/src/build.cc @@ -144,8 +144,11 @@ bool Plan::AddSubTarget(const Node* node, const Node* dependent, string* err, void Plan::EdgeWanted(const Edge* edge) { ++wanted_edges_; - if (!edge->is_phony()) + if (!edge->is_phony()) { ++command_edges_; + if (builder_) + builder_->status_->EdgeAddedToPlan(edge); + } } Edge* Plan::FindWork() { @@ -294,8 +297,11 @@ bool Plan::CleanNode(DependencyScan* scan, Node* node, string* err) { want_e->second = kWantNothing; --wanted_edges_; - if (!(*oe)->is_phony()) + if (!(*oe)->is_phony()) { --command_edges_; + if (builder_) + builder_->status_->EdgeRemovedFromPlan(*oe); + } } } } @@ -607,7 +613,6 @@ bool Builder::AlreadyUpToDate() const { bool Builder::Build(string* err) { assert(!AlreadyUpToDate()); - status_->PlanHasTotalEdges(plan_.command_edge_count()); int pending_commands = 0; int failures_allowed = config_.failures_allowed; @@ -780,8 +785,8 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { end_time_millis = GetTimeMillis() - start_time_millis_; running_edges_.erase(it); - status_->BuildEdgeFinished(edge, end_time_millis, result->success(), - result->output); + status_->BuildEdgeFinished(edge, start_time_millis, end_time_millis, + result->success(), result->output); // The rest of this function only applies to successful commands. if (!result->success()) { @@ -821,10 +826,6 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { } if (node_cleaned) { record_mtime = edge->command_start_time_; - - // The total number of edges in the plan may have changed as a result - // of a restat. - status_->PlanHasTotalEdges(plan_.command_edge_count()); } } @@ -938,8 +939,5 @@ bool Builder::LoadDyndeps(Node* node, string* err) { if (!plan_.DyndepsLoaded(&scan_, node, ddf, err)) return false; - // New command edges may have been added to the plan. - status_->PlanHasTotalEdges(plan_.command_edge_count()); - return true; } diff --git a/src/build_test.cc b/src/build_test.cc index 3908761057..04bdf4eece 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -2197,11 +2197,28 @@ TEST_F(BuildTest, DepsGccWithEmptyDepfileErrorsOut) { ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } -TEST_F(BuildTest, StatusFormatElapsed) { +TEST_F(BuildTest, StatusFormatElapsed_e) { status_.BuildStarted(); // Before any task is done, the elapsed time must be zero. - EXPECT_EQ("[%/e0.000]", - status_.FormatProgressStatus("[%%/e%e]", 0)); + EXPECT_EQ("[%/e0.000]", status_.FormatProgressStatus("[%%/e%e]", 0)); +} + +TEST_F(BuildTest, StatusFormatElapsed_w) { + status_.BuildStarted(); + // Before any task is done, the elapsed time must be zero. + EXPECT_EQ("[%/e00:00]", status_.FormatProgressStatus("[%%/e%w]", 0)); +} + +TEST_F(BuildTest, StatusFormatETA) { + status_.BuildStarted(); + // Before any task is done, the ETA time must be unknown. + EXPECT_EQ("[%/E?]", status_.FormatProgressStatus("[%%/E%E]", 0)); +} + +TEST_F(BuildTest, StatusFormatTimeProgress) { + status_.BuildStarted(); + // Before any task is done, the percentage of elapsed time must be zero. + EXPECT_EQ("[%/p 0%]", status_.FormatProgressStatus("[%%/p%p]", 0)); } TEST_F(BuildTest, StatusFormatReplacePlaceholder) { diff --git a/src/graph.h b/src/graph.h index d07a9b7639..511438cf0a 100644 --- a/src/graph.h +++ b/src/graph.h @@ -250,6 +250,10 @@ struct Edge { bool is_phony() const; bool use_console() const; bool maybe_phonycycle_diagnostic() const; + + // Historical info: how long did this edge take last time, + // as per .ninja_log, if known? Defaults to -1 if unknown. + int64_t prev_elapsed_time_millis = -1; }; struct EdgeCmp { diff --git a/src/ninja.cc b/src/ninja.cc index 39672c3d1c..1a2c8ccf3c 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -156,6 +156,10 @@ struct NinjaMain : public BuildLogUser { /// @return true if the manifest was rebuilt. bool RebuildManifest(const char* input_file, string* err, Status* status); + /// For each edge, lookup in build log how long it took last time, + /// and record that in the edge itself. It will be used for ETA predicton. + void ParsePreviousElapsedTimes(); + /// Build the targets listed on the command line. /// @return an exit code. int RunBuild(int argc, char** argv, Status* status); @@ -289,6 +293,19 @@ bool NinjaMain::RebuildManifest(const char* input_file, string* err, return true; } +void NinjaMain::ParsePreviousElapsedTimes() { + for (Edge* edge : state_.edges_) { + for (Node* out : edge->outputs_) { + BuildLog::LogEntry* log_entry = build_log_.LookupByOutput(out->path()); + if (!log_entry) + continue; // Maybe we'll have log entry for next output of this edge? + edge->prev_elapsed_time_millis = + log_entry->end_time - log_entry->start_time; + break; // Onto next edge. + } + } +} + Node* NinjaMain::CollectTarget(const char* cpath, string* err) { string path = cpath; if (path.empty()) { @@ -1598,6 +1615,8 @@ NORETURN void real_main(int argc, char** argv) { exit(1); } + ninja.ParsePreviousElapsedTimes(); + int result = ninja.RunBuild(argc, argv, status); if (g_metrics) ninja.DumpMetrics(); diff --git a/src/status.cc b/src/status.cc index 88b77815b3..06f3c20aae 100644 --- a/src/status.cc +++ b/src/status.cc @@ -14,6 +14,15 @@ #include "status.h" +#ifdef _WIN32 +#include "win32port.h" +#else +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include +#endif + #include #include @@ -27,11 +36,9 @@ using namespace std; StatusPrinter::StatusPrinter(const BuildConfig& config) - : config_(config), - started_edges_(0), finished_edges_(0), total_edges_(0), running_edges_(0), - time_millis_(0), progress_status_format_(NULL), + : config_(config), started_edges_(0), finished_edges_(0), total_edges_(0), + running_edges_(0), progress_status_format_(NULL), current_rate_(config.parallelism) { - // Don't do anything fancy in verbose mode. if (config_.verbosity != BuildConfig::NORMAL) printer_.set_smart_terminal(false); @@ -41,8 +48,32 @@ StatusPrinter::StatusPrinter(const BuildConfig& config) progress_status_format_ = "[%f/%t] "; } -void StatusPrinter::PlanHasTotalEdges(int total) { - total_edges_ = total; +void StatusPrinter::EdgeAddedToPlan(const Edge* edge) { + ++total_edges_; + + // Do we know how long did this edge take last time? + if (edge->prev_elapsed_time_millis != -1) { + ++eta_predictable_edges_total_; + ++eta_predictable_edges_remaining_; + eta_predictable_cpu_time_total_millis_ += edge->prev_elapsed_time_millis; + eta_predictable_cpu_time_remaining_millis_ += + edge->prev_elapsed_time_millis; + } else + ++eta_unpredictable_edges_remaining_; +} + +void StatusPrinter::EdgeRemovedFromPlan(const Edge* edge) { + --total_edges_; + + // Do we know how long did this edge take last time? + if (edge->prev_elapsed_time_millis != -1) { + --eta_predictable_edges_total_; + --eta_predictable_edges_remaining_; + eta_predictable_cpu_time_total_millis_ -= edge->prev_elapsed_time_millis; + eta_predictable_cpu_time_remaining_millis_ -= + edge->prev_elapsed_time_millis; + } else + --eta_unpredictable_edges_remaining_; } void StatusPrinter::BuildEdgeStarted(const Edge* edge, @@ -58,11 +89,102 @@ void StatusPrinter::BuildEdgeStarted(const Edge* edge, printer_.SetConsoleLocked(true); } -void StatusPrinter::BuildEdgeFinished(Edge* edge, int64_t end_time_millis, - bool success, const string& output) { +void StatusPrinter::RecalculateProgressPrediction() { + time_predicted_percentage_ = 0.0; + + // Sometimes, the previous and actual times may be wildly different. + // For example, the previous build may have been fully recovered from ccache, + // so it was blazing fast, while the new build no longer gets hits from ccache + // for whatever reason, so it actually compiles code, which takes much longer. + // We should detect such cases, and avoid using "wrong" previous times. + + // Note that we will only use the previous times if there are edges with + // previous time knowledge remaining. + bool use_previous_times = eta_predictable_edges_remaining_ && + eta_predictable_cpu_time_remaining_millis_; + + // Iff we have sufficient statistical information for the current run, + // that is, if we have took at least 15 sec AND finished at least 5% of edges, + // we can check whether our performance so far matches the previous one. + if (use_previous_times && total_edges_ && finished_edges_ && + (time_millis_ >= 15 * 1e3) && + (((double)finished_edges_ / total_edges_) >= 0.05)) { + // Over the edges we've just run, how long did they take on average? + double actual_average_cpu_time_millis = + (double)cpu_time_millis_ / finished_edges_; + // What is the previous average, for the edges with such knowledge? + double previous_average_cpu_time_millis = + (double)eta_predictable_cpu_time_total_millis_ / + eta_predictable_edges_total_; + + double ratio = std::max(previous_average_cpu_time_millis, + actual_average_cpu_time_millis) / + std::min(previous_average_cpu_time_millis, + actual_average_cpu_time_millis); + + // Let's say that the average times should differ by less than 10x + use_previous_times = ratio < 10; + } + + int edges_with_known_runtime = finished_edges_; + if (use_previous_times) + edges_with_known_runtime += eta_predictable_edges_remaining_; + if (edges_with_known_runtime == 0) + return; + + int edges_with_unknown_runtime = use_previous_times + ? eta_unpredictable_edges_remaining_ + : (total_edges_ - finished_edges_); + + // Given the time elapsed on the edges we've just run, + // and the runtime of the edges for which we know previous runtime, + // what's the edge's average runtime? + int64_t edges_known_runtime_total_millis = cpu_time_millis_; + if (use_previous_times) + edges_known_runtime_total_millis += + eta_predictable_cpu_time_remaining_millis_; + + double average_cpu_time_millis = + (double)edges_known_runtime_total_millis / edges_with_known_runtime; + + // For the edges for which we do not have the previous runtime, + // let's assume that their average runtime is the same as for the other edges, + // and we therefore can predict their remaining runtime. + double unpredictable_cpu_time_remaining_millis = + average_cpu_time_millis * edges_with_unknown_runtime; + + // And therefore we can predict the remaining and total runtimes. + double total_cpu_time_remaining_millis = + unpredictable_cpu_time_remaining_millis; + if (use_previous_times) + total_cpu_time_remaining_millis += + eta_predictable_cpu_time_remaining_millis_; + double total_cpu_time_millis = + cpu_time_millis_ + total_cpu_time_remaining_millis; + if (total_cpu_time_millis == 0.0) + return; + + // After that we can tell how much work we've completed, in time units. + time_predicted_percentage_ = cpu_time_millis_ / total_cpu_time_millis; +} + +void StatusPrinter::BuildEdgeFinished(Edge* edge, int64_t start_time_millis, + int64_t end_time_millis, bool success, + const string& output) { time_millis_ = end_time_millis; ++finished_edges_; + int64_t elapsed = end_time_millis - start_time_millis; + cpu_time_millis_ += elapsed; + + // Do we know how long did this edge take last time? + if (edge->prev_elapsed_time_millis != -1) { + --eta_predictable_edges_remaining_; + eta_predictable_cpu_time_remaining_millis_ -= + edge->prev_elapsed_time_millis; + } else + --eta_unpredictable_edges_remaining_; + if (edge->use_console()) printer_.SetConsoleLocked(false); @@ -201,16 +323,78 @@ string StatusPrinter::FormatProgressStatus(const char* progress_status_format, out += buf; break; - // Percentage + // Percentage of edges completed case 'p': { - int percent = (100 * finished_edges_) / total_edges_; + int percent = 0; + if (finished_edges_ != 0 && total_edges_ != 0) + percent = (100 * finished_edges_) / total_edges_; snprintf(buf, sizeof(buf), "%3i%%", percent); out += buf; break; } - case 'e': { - snprintf(buf, sizeof(buf), "%.3f", time_millis_ / 1e3); +#define FORMAT_TIME_HMMSS(t) \ + "%" PRId64 ":%02" PRId64 ":%02" PRId64 "", (t) / 3600, ((t) % 3600) / 60, \ + (t) % 60 +#define FORMAT_TIME_MMSS(t) "%02" PRId64 ":%02" PRId64 "", (t) / 60, (t) % 60 + + // Wall time + case 'e': // elapsed, seconds + case 'w': // elapsed, human-readable + case 'E': // ETA, seconds + case 'W': // ETA, human-readable + { + double elapsed_sec = time_millis_ / 1e3; + double eta_sec = -1; // To be printed as "?". + if (time_predicted_percentage_ != 0.0) { + // So, we know that we've spent time_millis_ wall clock, + // and that is time_predicted_percentage_ percent. + // How much time will we need to complete 100%? + double total_wall_time = time_millis_ / time_predicted_percentage_; + // Naturally, that gives us the time remaining. + eta_sec = (total_wall_time - time_millis_) / 1e3; + } + + const bool print_with_hours = + elapsed_sec >= 60 * 60 || eta_sec >= 60 * 60; + + double sec = -1; + switch (*s) { + case 'e': // elapsed, seconds + case 'w': // elapsed, human-readable + sec = elapsed_sec; + break; + case 'E': // ETA, seconds + case 'W': // ETA, human-readable + sec = eta_sec; + break; + } + + if (sec < 0) + snprintf(buf, sizeof(buf), "?"); + else { + switch (*s) { + case 'e': // elapsed, seconds + case 'E': // ETA, seconds + snprintf(buf, sizeof(buf), "%.3f", sec); + break; + case 'w': // elapsed, human-readable + case 'W': // ETA, human-readable + if (print_with_hours) + snprintf(buf, sizeof(buf), FORMAT_TIME_HMMSS((int64_t)sec)); + else + snprintf(buf, sizeof(buf), FORMAT_TIME_MMSS((int64_t)sec)); + break; + } + } + out += buf; + break; + } + + // Percentage of time spent out of the predicted time total + case 'P': { + snprintf(buf, sizeof(buf), "%3i%%", + (int)(100. * time_predicted_percentage_)); out += buf; break; } @@ -232,6 +416,8 @@ void StatusPrinter::PrintStatus(const Edge* edge, int64_t time_millis) { || config_.verbosity == BuildConfig::NO_STATUS_UPDATE) return; + RecalculateProgressPrediction(); + bool force_full_command = config_.verbosity == BuildConfig::VERBOSE; string to_print = edge->GetBinding("description"); diff --git a/src/status.h b/src/status.h index b2e50ea1aa..a1a8fddeae 100644 --- a/src/status.h +++ b/src/status.h @@ -24,10 +24,13 @@ /// Abstract interface to object that tracks the status of a build: /// completion fraction, printing updates. struct Status { - virtual void PlanHasTotalEdges(int total) = 0; - virtual void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis) = 0; - virtual void BuildEdgeFinished(Edge* edge, int64_t end_time_millis, - bool success, const std::string& output) = 0; + virtual void EdgeAddedToPlan(const Edge* edge) = 0; + virtual void EdgeRemovedFromPlan(const Edge* edge) = 0; + virtual void BuildEdgeStarted(const Edge* edge, + int64_t start_time_millis) = 0; + virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis, + int64_t end_time_millis, bool success, + const std::string& output) = 0; virtual void BuildLoadDyndeps() = 0; virtual void BuildStarted() = 0; virtual void BuildFinished() = 0; @@ -43,10 +46,15 @@ struct Status { /// human-readable strings to stdout struct StatusPrinter : Status { explicit StatusPrinter(const BuildConfig& config); - virtual void PlanHasTotalEdges(int total); + + /// Callbacks for the Plan to notify us about adding/removing Edge's. + virtual void EdgeAddedToPlan(const Edge* edge); + virtual void EdgeRemovedFromPlan(const Edge* edge); + virtual void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis); - virtual void BuildEdgeFinished(Edge* edge, int64_t end_time_millis, - bool success, const std::string& output); + virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis, + int64_t end_time_millis, bool success, + const std::string& output); virtual void BuildLoadDyndeps(); virtual void BuildStarted(); virtual void BuildFinished(); @@ -71,7 +79,30 @@ struct StatusPrinter : Status { const BuildConfig& config_; int started_edges_, finished_edges_, total_edges_, running_edges_; - int64_t time_millis_; + + /// How much wall clock elapsed so far? + int64_t time_millis_ = 0; + + /// How much cpu clock elapsed so far? + int64_t cpu_time_millis_ = 0; + + /// What percentage of predicted total time have elapsed already? + double time_predicted_percentage_ = 0.0; + + /// Out of all the edges, for how many do we know previous time? + int eta_predictable_edges_total_ = 0; + /// And how much time did they all take? + int64_t eta_predictable_cpu_time_total_millis_ = 0; + + /// Out of all the non-finished edges, for how many do we know previous time? + int eta_predictable_edges_remaining_ = 0; + /// And how much time will they all take? + int64_t eta_predictable_cpu_time_remaining_millis_ = 0; + + /// For how many edges we don't know the previous run time? + int eta_unpredictable_edges_remaining_ = 0; + + void RecalculateProgressPrediction(); /// Prints progress output. LinePrinter printer_; diff --git a/src/status_test.cc b/src/status_test.cc index 6e42490ab1..411d3ed03d 100644 --- a/src/status_test.cc +++ b/src/status_test.cc @@ -22,8 +22,9 @@ TEST(StatusTest, StatusFormatElapsed) { status.BuildStarted(); // Before any task is done, the elapsed time must be zero. - EXPECT_EQ("[%/e0.000]", - status.FormatProgressStatus("[%%/e%e]", 0)); + EXPECT_EQ("[%/e0.000]", status.FormatProgressStatus("[%%/e%e]", 0)); + // Before any task is done, the elapsed time must be zero. + EXPECT_EQ("[%/e00:00]", status.FormatProgressStatus("[%%/e%w]", 0)); } TEST(StatusTest, StatusFormatReplacePlaceholder) { From 2457e5b66c5bab8b6414370f6e37d4fb397dc574 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Thu, 12 Oct 2023 23:35:13 +0200 Subject: [PATCH 092/127] Update GoogleTest to last release to support C++11 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 65a17068a7..3017f33881 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -227,8 +227,8 @@ if(BUILD_TESTING) include(FetchContent) FetchContent_Declare( googletest - URL https://github.com/google/googletest/archive/release-1.10.0.tar.gz - URL_HASH SHA1=9c89be7df9c5e8cb0bc20b3c4b39bf7e82686770 + URL https://github.com/google/googletest/archive/release-1.12.1.tar.gz + URL_HASH SHA1=cdddd449d4e3aa7bd421d4519c17139ea1890fe7 ) FetchContent_MakeAvailable(googletest) endif() From 46dbd80db8f974ed186dc14b9864ca0baa689875 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Thu, 12 Oct 2023 23:38:14 +0200 Subject: [PATCH 093/127] GitHub Actions: configure.py no longer builds tests --- .github/workflows/linux.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f158d1a1ef..4a60653447 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -144,7 +144,6 @@ jobs: run: | python3 configure.py --bootstrap ./ninja all - ./ninja_test --gtest_filter=-SubprocessTest.SetWithLots python3 misc/ninja_syntax_test.py ./misc/output_test.py From b777a12df2fdb5408d4734b0ea341f37af40c7f2 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Thu, 12 Oct 2023 23:53:04 +0200 Subject: [PATCH 094/127] Revert back to GoogleTest 1.10.0 for RHEL 7 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3017f33881..65a17068a7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -227,8 +227,8 @@ if(BUILD_TESTING) include(FetchContent) FetchContent_Declare( googletest - URL https://github.com/google/googletest/archive/release-1.12.1.tar.gz - URL_HASH SHA1=cdddd449d4e3aa7bd421d4519c17139ea1890fe7 + URL https://github.com/google/googletest/archive/release-1.10.0.tar.gz + URL_HASH SHA1=9c89be7df9c5e8cb0bc20b3c4b39bf7e82686770 ) FetchContent_MakeAvailable(googletest) endif() From d3f01d339f5566aab81368527b81140f9dcd61cb Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Fri, 13 Oct 2023 00:12:39 +0200 Subject: [PATCH 095/127] GitHub Actions: Install libgtest-dev for Ubuntu 20.04 --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 4a60653447..4a17f0e084 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -83,7 +83,7 @@ jobs: - name: Install dependencies run: | apt update - apt install -y python3-pytest ninja-build clang-tidy python3-pip clang + apt install -y python3-pytest ninja-build clang-tidy python3-pip clang libgtest-dev pip3 install cmake==3.17.* - name: Configure (GCC) run: cmake -Bbuild-gcc -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config' From 2aea5676eb70575942e48bd80161455a3e3c4b10 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Sat, 14 Oct 2023 11:09:24 +0200 Subject: [PATCH 096/127] GitHub Actions: C++11 is now the default --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 459607afc1..5a230ae748 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -21,7 +21,7 @@ jobs: env: MACOSX_DEPLOYMENT_TARGET: 10.15 run: | - CXXFLAGS=-std=c++11 cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64' + cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64' cmake --build build --config Release - name: Test ninja From 7ba27a393ae742b6660f075b9ada29ced19cacc0 Mon Sep 17 00:00:00 2001 From: Shao-Ce SUN Date: Mon, 23 Oct 2023 10:23:56 +0800 Subject: [PATCH 097/127] Add a check for re2c version --- CMakeLists.txt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d0c62f9005..b95dacbc63 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,8 +57,11 @@ else() endif() # --- optional re2c +set(RE2C_PROPER FALSE) find_program(RE2C re2c) -if(RE2C) +execute_process(COMMAND "${RE2C}" --vernum OUTPUT_VARIABLE RE2C_RAW_VERSION) +math(EXPR RE2C_MAJOR_VERSION "${RE2C_RAW_VERSION} / 10000") +if(RE2C AND ${RE2C_MAJOR_VERSION} GREATER 1) # the depfile parser and ninja lexers are generated using re2c. function(re2c IN OUT) add_custom_command(DEPENDS ${IN} OUTPUT ${OUT} @@ -69,7 +72,7 @@ if(RE2C) re2c(${PROJECT_SOURCE_DIR}/src/lexer.in.cc ${PROJECT_BINARY_DIR}/lexer.cc) add_library(libninja-re2c OBJECT ${PROJECT_BINARY_DIR}/depfile_parser.cc ${PROJECT_BINARY_DIR}/lexer.cc) else() - message(WARNING "re2c was not found; changes to src/*.in.cc will not affect your build.") + message(WARNING "re2c ${RE2C_MAJOR_VERSION} or later was not found; changes to src/*.in.cc will not affect your build.") add_library(libninja-re2c OBJECT src/depfile_parser.cc src/lexer.cc) endif() target_include_directories(libninja-re2c PRIVATE src) From 9db4889d7abd54f7276b12210705567ab62c3abe Mon Sep 17 00:00:00 2001 From: Shao-Ce SUN Date: Mon, 23 Oct 2023 10:30:27 +0800 Subject: [PATCH 098/127] fixup! --- CMakeLists.txt | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b95dacbc63..c0745af04b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,11 +57,13 @@ else() endif() # --- optional re2c -set(RE2C_PROPER FALSE) +set(RE2C_MAJOR_VERSION 0) find_program(RE2C re2c) -execute_process(COMMAND "${RE2C}" --vernum OUTPUT_VARIABLE RE2C_RAW_VERSION) -math(EXPR RE2C_MAJOR_VERSION "${RE2C_RAW_VERSION} / 10000") -if(RE2C AND ${RE2C_MAJOR_VERSION} GREATER 1) +if(RE2C) + execute_process(COMMAND "${RE2C}" --vernum OUTPUT_VARIABLE RE2C_RAW_VERSION) + math(EXPR RE2C_MAJOR_VERSION "${RE2C_RAW_VERSION} / 10000") +endif() +if(${RE2C_MAJOR_VERSION} GREATER 1) # the depfile parser and ninja lexers are generated using re2c. function(re2c IN OUT) add_custom_command(DEPENDS ${IN} OUTPUT ${OUT} @@ -72,7 +74,7 @@ if(RE2C AND ${RE2C_MAJOR_VERSION} GREATER 1) re2c(${PROJECT_SOURCE_DIR}/src/lexer.in.cc ${PROJECT_BINARY_DIR}/lexer.cc) add_library(libninja-re2c OBJECT ${PROJECT_BINARY_DIR}/depfile_parser.cc ${PROJECT_BINARY_DIR}/lexer.cc) else() - message(WARNING "re2c ${RE2C_MAJOR_VERSION} or later was not found; changes to src/*.in.cc will not affect your build.") + message(WARNING "re2c 2 or later was not found; changes to src/*.in.cc will not affect your build.") add_library(libninja-re2c OBJECT src/depfile_parser.cc src/lexer.cc) endif() target_include_directories(libninja-re2c PRIVATE src) From a590e18b577cc3fd2890a96b6939504e3f32cbdc Mon Sep 17 00:00:00 2001 From: Tamino Bauknecht Date: Tue, 24 Oct 2023 18:22:02 +0200 Subject: [PATCH 099/127] Do not print 'no work to do' with --quiet --- src/ninja.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/ninja.cc b/src/ninja.cc index 39672c3d1c..c011be1356 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -1366,7 +1366,9 @@ int NinjaMain::RunBuild(int argc, char** argv, Status* status) { disk_interface_.AllowStatCache(false); if (builder.AlreadyUpToDate()) { - status->Info("no work to do."); + if (config_.verbosity != BuildConfig::NO_STATUS_UPDATE) { + status->Info("no work to do."); + } return 0; } From 1111da82eecc27c87c622cae87e806cc3ca9da5c Mon Sep 17 00:00:00 2001 From: Tamino Bauknecht Date: Tue, 24 Oct 2023 18:34:40 +0200 Subject: [PATCH 100/127] Add output_test assertion for quiet without 'no work to do' --- misc/output_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/misc/output_test.py b/misc/output_test.py index 94d1fda9fe..a09448255e 100755 --- a/misc/output_test.py +++ b/misc/output_test.py @@ -133,6 +133,7 @@ def test_pr_1685(self): def test_status(self): self.assertEqual(run(''), 'ninja: no work to do.\n') self.assertEqual(run('', pipe=True), 'ninja: no work to do.\n') + self.assertEqual(run('', flags='--quiet'), '') def test_ninja_status_default(self): 'Do we show the default status by default?' From a744eea2b6c9b37024b12e749e61170e5c87d171 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Mon, 6 Nov 2023 14:53:29 +0100 Subject: [PATCH 101/127] Remove phony edges for nodes created by a dependency loader. This patch simplifies Ninja internals without modifying its behavior. It removes the creation (and removal) of phony edges as producers for nodes loaded by dependency loaders, i.e. coming from depfiles, dyndep files or the deps log. These edges were only used to ensure the build did not abort when these files are missing, unlike regular source inputs. This can be easily checked by adding a new flag to the Node class instead. This makes it easier to reason about how Ninja works internally. More specifically: - Move the generated_by_dep_loader_ flag from the Edge class to the Node class. The flag is true by default to minimize changes to the source code, since node instances can be first created by reading the deps or build logs before the manifest itself. - Modify Plan::AddSubTarget() to avoid aborting the build when a generated-by-deploader node is missing. Instead the function exits immediately, which corresponds to what happened before. - State::AddOut(), State::AddIn(), State::AddValidation(): Ensure that nodes added by these methods, which are only called from the manifest parser and unit-tests set the |generated_by_dep_loader_| flag to false, to indicate that these are regular input / output nodes. - ManifestParser::ParseEdge(): Add an assertion verifying that the dyndep file is marked as a regular input. - DyndepLoader::UpdateEdge(): Remove code path that looked for phony in-edges and ignored them. - DepLoader::CreatePhonyInEdge() is removed as no longer necessary. + Update a few places in unit-tests that were checking for the creation of the phony edges. Fuchsia-Topic: persistent-mode Change-Id: I98998238002351ef9c7a103040eb8a26d4183969 --- src/build.cc | 23 ++++++++++++++--------- src/build_test.cc | 41 ++++++++++++++++++++++++----------------- src/dyndep.cc | 13 ++++--------- src/graph.cc | 20 -------------------- src/graph.h | 20 +++++++++++++++----- src/manifest_parser.cc | 3 +++ src/state.cc | 3 +++ src/state.h | 3 +++ 8 files changed, 66 insertions(+), 60 deletions(-) diff --git a/src/build.cc b/src/build.cc index 76ff93af03..6903e45306 100644 --- a/src/build.cc +++ b/src/build.cc @@ -95,15 +95,20 @@ bool Plan::AddTarget(const Node* target, string* err) { bool Plan::AddSubTarget(const Node* node, const Node* dependent, string* err, set* dyndep_walk) { Edge* edge = node->in_edge(); - if (!edge) { // Leaf node. - if (node->dirty()) { - string referenced; - if (dependent) - referenced = ", needed by '" + dependent->path() + "',"; - *err = "'" + node->path() + "'" + referenced + " missing " - "and no known rule to make it"; - } - return false; + if (!edge) { + // Leaf node, this can be either a regular input from the manifest + // (e.g. a source file), or an implicit input from a depfile or dyndep + // file. In the first case, a dirty flag means the file is missing, + // and the build should stop. In the second, do not do anything here + // since there is no producing edge to add to the plan. + if (node->dirty() && !node->generated_by_dep_loader()) { + string referenced; + if (dependent) + referenced = ", needed by '" + dependent->path() + "',"; + *err = "'" + node->path() + "'" + referenced + + " missing and no known rule to make it"; + } + return false; } if (edge->outputs_ready()) diff --git a/src/build_test.cc b/src/build_test.cc index d32ad3e4c6..5ed8245e25 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -986,9 +986,19 @@ TEST_F(BuildTest, DepFileOK) { ASSERT_EQ(1u, fs_.files_read_.size()); EXPECT_EQ("foo.o.d", fs_.files_read_[0]); - // Expect three new edges: one generating foo.o, and two more from - // loading the depfile. - ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size()); + // Expect one new edge generating foo.o. Loading the depfile should have + // added nodes, but not phony edges to the graph. + ASSERT_EQ(orig_edges + 1, (int)state_.edges_.size()); + + // Verify that nodes for blah.h and bar.h were added and that they + // are marked as generated by a dep loader. + ASSERT_FALSE(state_.LookupNode("foo.o")->generated_by_dep_loader()); + ASSERT_FALSE(state_.LookupNode("foo.c")->generated_by_dep_loader()); + ASSERT_TRUE(state_.LookupNode("blah.h")); + ASSERT_TRUE(state_.LookupNode("blah.h")->generated_by_dep_loader()); + ASSERT_TRUE(state_.LookupNode("bar.h")); + ASSERT_TRUE(state_.LookupNode("bar.h")->generated_by_dep_loader()); + // Expect our edge to now have three inputs: foo.c and two headers. ASSERT_EQ(3u, edge->inputs_.size()); @@ -1154,7 +1164,6 @@ TEST_F(BuildTest, DepFileCanonicalize) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n depfile = $out.d\n" "build gen/stuff\\things/foo.o: cc x\\y/z\\foo.c\n")); - Edge* edge = state_.edges_.back(); fs_.Create("x/y/z/foo.c", ""); GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing. @@ -1167,10 +1176,10 @@ TEST_F(BuildTest, DepFileCanonicalize) { // The depfile path does not get Canonicalize as it seems unnecessary. EXPECT_EQ("gen/stuff\\things/foo.o.d", fs_.files_read_[0]); - // Expect three new edges: one generating foo.o, and two more from - // loading the depfile. - ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size()); + // Expect one new edge enerating foo.o. + ASSERT_EQ(orig_edges + 1, (int)state_.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. + Edge* edge = state_.edges_.back(); ASSERT_EQ(3u, edge->inputs_.size()); // Expect the command line we generate to only use the original input, and @@ -2968,9 +2977,9 @@ TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) { EXPECT_TRUE(builder.AddTarget("fo o.o", &err)); ASSERT_EQ("", err); - // Expect three new edges: one generating fo o.o, and two more from - // loading the depfile. - ASSERT_EQ(3u, state.edges_.size()); + // Expect one new edge generating fo o.o, loading the depfile should + // note generate new edges. + ASSERT_EQ(1u, state.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. ASSERT_EQ(3u, edge->inputs_.size()); @@ -3110,16 +3119,14 @@ TEST_F(BuildWithDepsLogTest, DepFileDepsLogCanonicalize) { Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); - Edge* edge = state.edges_.back(); - state.GetNode("bar.h", 0)->MarkDirty(); // Mark bar.h as missing. EXPECT_TRUE(builder.AddTarget("a/b/c/d/e/fo o.o", &err)); ASSERT_EQ("", err); - // Expect three new edges: one generating fo o.o, and two more from - // loading the depfile. - ASSERT_EQ(3u, state.edges_.size()); + // Expect one new edge generating fo o.o. + ASSERT_EQ(1u, state.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. + Edge* edge = state.edges_.back(); ASSERT_EQ(3u, edge->inputs_.size()); // Expect the command line we generate to only use the original input. @@ -3675,8 +3682,8 @@ TEST_F(BuildTest, DyndepBuildDiscoverOutputAndDepfileInput) { EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); - // Loading the depfile gave tmp.imp a phony input edge. - ASSERT_TRUE(GetNode("tmp.imp")->in_edge()->is_phony()); + // Loading the depfile did not give tmp.imp a phony input edge. + ASSERT_FALSE(GetNode("tmp.imp")->in_edge()); EXPECT_TRUE(builder_.Build(&err)); EXPECT_EQ("", err); diff --git a/src/dyndep.cc b/src/dyndep.cc index dd4ed099a9..a0d699d5db 100644 --- a/src/dyndep.cc +++ b/src/dyndep.cc @@ -97,15 +97,10 @@ bool DyndepLoader::UpdateEdge(Edge* edge, Dyndeps const* dyndeps, for (std::vector::const_iterator i = dyndeps->implicit_outputs_.begin(); i != dyndeps->implicit_outputs_.end(); ++i) { - if (Edge* old_in_edge = (*i)->in_edge()) { - // This node already has an edge producing it. Fail with an error - // unless the edge was generated by ImplicitDepLoader, in which - // case we can replace it with the now-known real producer. - if (!old_in_edge->generated_by_dep_loader_) { - *err = "multiple rules generate " + (*i)->path(); - return false; - } - old_in_edge->outputs_.clear(); + if ((*i)->in_edge()) { + // This node already has an edge producing it. + *err = "multiple rules generate " + (*i)->path(); + return false; } (*i)->set_in_edge(edge); } diff --git a/src/graph.cc b/src/graph.cc index 199294d481..62f13ec100 100644 --- a/src/graph.cc +++ b/src/graph.cc @@ -728,7 +728,6 @@ bool ImplicitDepLoader::ProcessDepfileDeps( Node* node = state_->GetNode(*i, slash_bits); *implicit_dep = node; node->AddOutEdge(edge); - CreatePhonyInEdge(node); } return true; @@ -756,7 +755,6 @@ bool ImplicitDepLoader::LoadDepsFromLog(Edge* edge, string* err) { Node* node = deps->nodes[i]; *implicit_dep = node; node->AddOutEdge(edge); - CreatePhonyInEdge(node); } return true; } @@ -768,21 +766,3 @@ vector::iterator ImplicitDepLoader::PreallocateSpace(Edge* edge, edge->implicit_deps_ += count; return edge->inputs_.end() - edge->order_only_deps_ - count; } - -void ImplicitDepLoader::CreatePhonyInEdge(Node* node) { - if (node->in_edge()) - return; - - Edge* phony_edge = state_->AddEdge(&State::kPhonyRule); - phony_edge->generated_by_dep_loader_ = true; - node->set_in_edge(phony_edge); - phony_edge->outputs_.push_back(node); - - // RecomputeDirty might not be called for phony_edge if a previous call - // to RecomputeDirty had caused the file to be stat'ed. Because previous - // invocations of RecomputeDirty would have seen this node without an - // input edge (and therefore ready), we have to set outputs_ready_ to true - // to avoid a potential stuck build. If we do call RecomputeDirty for - // this node, it will simply set outputs_ready_ to the correct value. - phony_edge->outputs_ready_ = true; -} diff --git a/src/graph.h b/src/graph.h index d07a9b7639..5c8ca2c610 100644 --- a/src/graph.h +++ b/src/graph.h @@ -104,6 +104,14 @@ struct Node { Edge* in_edge() const { return in_edge_; } void set_in_edge(Edge* edge) { in_edge_ = edge; } + /// Indicates whether this node was generated from a depfile or dyndep file, + /// instead of being a regular input or output from the Ninja manifest. + bool generated_by_dep_loader() const { return generated_by_dep_loader_; } + + void set_generated_by_dep_loader(bool value) { + generated_by_dep_loader_ = value; + } + int id() const { return id_; } void set_id(int id) { id_ = id; } @@ -146,6 +154,13 @@ struct Node { /// has not yet been loaded. bool dyndep_pending_; + /// Set to true when this node comes from a depfile, a dyndep file or the + /// deps log. If it does not have a producing edge, the build should not + /// abort if it is missing (as for regular source inputs). By default + /// all nodes have this flag set to true, since the deps and build logs + /// can be loaded before the manifest. + bool generated_by_dep_loader_ = true; + /// The Edge that produces this Node, or NULL when there is no /// known edge to produce it. Edge* in_edge_; @@ -297,11 +312,6 @@ struct ImplicitDepLoader { /// an iterator pointing at the first new space. std::vector::iterator PreallocateSpace(Edge* edge, int count); - /// If we don't have a edge that generates this input already, - /// create one; this makes us not abort if the input is missing, - /// but instead will rebuild in that circumstance. - void CreatePhonyInEdge(Node* node); - State* state_; DiskInterface* disk_interface_; DepsLog* deps_log_; diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 8db6eb3009..103c365ebf 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -14,8 +14,10 @@ #include "manifest_parser.h" +#include #include #include + #include #include "graph.h" @@ -416,6 +418,7 @@ bool ManifestParser::ParseEdge(string* err) { if (dgi == edge->inputs_.end()) { return lexer_.Error("dyndep '" + dyndep + "' is not an input", err); } + assert(!edge->dyndep_->generated_by_dep_loader()); } return true; diff --git a/src/state.cc b/src/state.cc index 556b0d8802..0a68f2163a 100644 --- a/src/state.cc +++ b/src/state.cc @@ -128,6 +128,7 @@ Node* State::SpellcheckNode(const string& path) { void State::AddIn(Edge* edge, StringPiece path, uint64_t slash_bits) { Node* node = GetNode(path, slash_bits); + node->set_generated_by_dep_loader(false); edge->inputs_.push_back(node); node->AddOutEdge(edge); } @@ -138,6 +139,7 @@ bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits) { return false; edge->outputs_.push_back(node); node->set_in_edge(edge); + node->set_generated_by_dep_loader(false); return true; } @@ -145,6 +147,7 @@ void State::AddValidation(Edge* edge, StringPiece path, uint64_t slash_bits) { Node* node = GetNode(path, slash_bits); edge->validations_.push_back(node); node->AddValidationOutEdge(edge); + node->set_generated_by_dep_loader(false); } bool State::AddDefault(StringPiece path, string* err) { diff --git a/src/state.h b/src/state.h index 878ac6d991..886b78f765 100644 --- a/src/state.h +++ b/src/state.h @@ -105,6 +105,9 @@ struct State { Node* LookupNode(StringPiece path) const; Node* SpellcheckNode(const std::string& path); + /// Add input / output / validation nodes to a given edge. This also + /// ensures that the generated_by_dep_loader() flag for all these nodes + /// is set to false, to indicate that they come from the input manifest. void AddIn(Edge* edge, StringPiece path, uint64_t slash_bits); bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits); void AddValidation(Edge* edge, StringPiece path, uint64_t slash_bits); From 87c92f2cab7852f54bc697d53769ebb75e4a88be Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Tue, 7 Nov 2023 15:40:31 +0100 Subject: [PATCH 102/127] CMakeLists.txt: Use GTest::gtest instead of gtest This fixes the case where GTEST_ROOT is set to point to a local GoogleTest installation (see example below). Note that this needs a work-around for a subtle GTest 1.10.0: - When downloading, building then installing googletest-1.10.0, the installation directory contains CMake files that are picked later by Ninja's find_package() function properly, and which define the GTest::gtest target. This is the target name that should be used by projects that depend on GoogleTest, per the official documentation. - When instead 1.10.0, i.e. the same version, is downloaded and used locally with FetchContent_Declare() + FetchContent_MakeAvailable(), then only the `gtest` target will be defined. This was fixed in 1.11.0, where this use case properly defines GTest::gtest instead. The work-around checks for the definition of GTest::gtest after the FetchContent_MakeAvailable(googletest) call. If not defined, an alias to `gtest` is created instead. This ensures the code works with more recent GoogleTest releases as well. --- CMakeLists.txt | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 65a17068a7..91ff0135f2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -231,6 +231,23 @@ if(BUILD_TESTING) URL_HASH SHA1=9c89be7df9c5e8cb0bc20b3c4b39bf7e82686770 ) FetchContent_MakeAvailable(googletest) + + # Before googletest-1.11.0, the CMake files provided by the source archive + # did not define the GTest::gtest target, only the gtest one, so define + # an alias when needed to ensure the rest of this file works with all + # GoogleTest releases. + # + # Note that surprisingly, this is not needed when using GTEST_ROOT to + # point to a local installation, because this one contains CMake-generated + # files that contain the right target definition, and which will be + # picked up by the find_package(GTest) file above. + # + # This comment and the four lines below can be removed once Ninja only + # depends on release-1.11.0 or above. + if (NOT TARGET GTest::gtest) + message(STATUS "Defining GTest::gtest alias to work-around bug in older release.") + add_library(GTest::gtest ALIAS gtest) + endif() endif() # Tests all build into ninja_test executable. @@ -261,7 +278,7 @@ if(BUILD_TESTING) windows/ninja.manifest) endif() find_package(Threads REQUIRED) - target_link_libraries(ninja_test PRIVATE libninja libninja-re2c gtest Threads::Threads) + target_link_libraries(ninja_test PRIVATE libninja libninja-re2c GTest::gtest Threads::Threads) foreach(perftest build_log_perftest From 55bc4840064e75e54015eaf1b5fa659e607ba065 Mon Sep 17 00:00:00 2001 From: Orgad Shaneh Date: Sun, 19 Nov 2023 08:45:06 +0200 Subject: [PATCH 103/127] Fix typo Amends a744eea2b6c9b37024b12e749e61170e5c87d171. --- src/build_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/build_test.cc b/src/build_test.cc index 5ed8245e25..8152b4e312 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -2978,7 +2978,7 @@ TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) { ASSERT_EQ("", err); // Expect one new edge generating fo o.o, loading the depfile should - // note generate new edges. + // not generate new edges. ASSERT_EQ(1u, state.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. ASSERT_EQ(3u, edge->inputs_.size()); From 8a2575e432b85baecb0054cc570db69f074c2633 Mon Sep 17 00:00:00 2001 From: Florian Schmaus Date: Mon, 5 Apr 2021 15:38:47 +0200 Subject: [PATCH 104/127] Consider the remaining load capacity in main loop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This changes CanRunMore() to return an int instead of a bool. The return value is the "remaining load capacity. That is the number of new jobs that can be spawned without saturating a potential enabled load limitation (if ninja's -l option is used). We assume that every started edge increases the load by one. Hence the available "load capacity" is the maximum allowed load minus the current load. Previously, ninja would oversaturate the system with jobs, even though a load and job limit was provided, when multiple ninja builds are running. This is because changes in load average are inert, newly started dobs to no immediatly change the load average, yet ninja assumed that new jobs are immediately reflected in the load average. Ninja would retrieve the current 1min load average, check if it is below the limit and, if so, start a new job, and then repeat. Since it takes a while for the new job to get reflected in the load average, ninja would often spawn jobs until the job limit ("-j") is reached. If this is done by multiple parallel ninja builds, then the system becomes oversaturated, causing excessing context switches, which eventually slow down each and every build process. We can easily prevent this by considering the remaining load capacity in ninja's main loop. The following benchmark demonstrates how the change of this comit helps to speed up multiple parallel builds on the same host. We compare the total build times of 8 parallel builds of LLVM on a 256-core system using "ninja -j 258". ninja-master: 1351 seconds ninja-load-capacity: 920 seconds That is, with this commit, the whole process becomes 1.46× faster. The used benchmark script created and prepared 8 build directories, records the start time, spawns 8 subshells invoking "ninja -j 258", awaits the termination of those subshells, and records the end time. Besides the total running time, it also outputs /proc/loadavg, provides an indication of where the performance is gained: ninja-master: 3.90 93.94 146.38 1/1936 209125 ninja-load-capacity: 92.46 210.50 199.90 1/1936 36917 So with this change, ninja uses the available hardware cores better in the presence of competing ninja processes, while it does not overload the system. Finally, let us look at the two "dstat -cdgyl 60" traces of 8 parallel LLVM builds on a 256-core machine using "ninja -l 258": ninja-master --total-cpu-usage-- -dsk/total- ---paging-- ---system-- ---load-avg--- usr sys idl wai stl| read writ| in out | int csw | 1m 5m 15m 1 0 99 0 0| 12k 4759k| 5B 55B|1135 455 |17.9 70.3 38.1 38 6 56 0 0|2458B 7988k| 205B 0 | 34k 23k| 466 170 73.2 26 3 71 0 0| 102k 94M| 0 0 | 22k 6265 | 239 156 74.3 50 5 45 0 0|3149B 97M| 0 0 | 37k 12k| 257 191 92.2 58 6 36 0 0| 90k 71M| 0 0 | 43k 12k| 320 224 110 50 4 46 0 0| 52k 78M| 0 0 | 38k 6690 | 247 223 117 50 5 45 0 0| 202k 90M| 0 0 | 37k 9876 | 239 238 130 60 5 34 0 0| 109k 93M| 0 0 | 44k 8950 | 247 248 140 69 5 26 0 0|5939B 93M| 0 0 | 50k 11k| 309 268 154 49 4 47 0 0| 172k 111M| 0 0 | 36k 7835 | 283 267 161 58 7 35 0 0| 29k 142M| 0 0 | 45k 7666 | 261 267 168 72 4 24 0 0| 46k 281M| 0 0 | 50k 13k| 384 296 183 49 6 46 0 0| 68B 198M| 0 0 | 37k 6847 | 281 281 185 82 6 12 0 0| 0 97M| 0 0 | 59k 15k| 462 323 205 31 5 63 0 0| 0 301M| 0 0 | 26k 5350 | 251 291 202 66 7 28 0 0| 68B 254M| 0 0 | 49k 9091 | 270 292 208 68 8 25 0 0| 0 230M| 0 0 | 51k 8186 | 287 292 213 52 5 42 1 0| 0 407M| 0 0 | 42k 5619 | 207 271 211 29 7 64 0 0| 0 418M| 0 0 | 27k 2801 | 131 241 205 1 1 98 0 0| 137B 267M| 0 0 |1944 813 |55.8 199 193 0 0 100 0 0|2253B 43M| 0 0 | 582 365 |26.8 165 181 0 0 99 0 0| 0 68M| 0 0 | 706 414 |11.5 136 170 4 0 96 0 0| 0 13M| 0 0 |2892 378 |10.0 113 160 ninja-load-capacity --total-cpu-usage-- -dsk/total- ---paging-- ---system-- ---load-avg--- usr sys idl wai stl| read writ| in out | int csw | 1m 5m 15m 1 0 98 0 0| 12k 5079k| 5B 55B|1201 470 |1.35 40.2 115 43 6 51 0 0|3345B 78M| 0 0 | 34k 20k| 247 127 142 71 6 23 0 0| 0 59M| 0 0 | 53k 8485 | 286 159 152 60 5 35 0 0| 68B 118M| 0 0 | 45k 7125 | 277 178 158 62 4 35 0 0| 0 115M| 0 0 | 45k 6036 | 248 188 163 61 5 34 0 0| 0 96M| 0 0 | 44k 9448 | 284 212 173 66 5 28 0 0| 9B 94M| 0 0 | 49k 5733 | 266 219 178 64 7 29 0 0| 0 159M| 0 0 | 49k 6350 | 241 223 182 66 6 28 0 0| 0 240M| 0 0 | 50k 9325 | 285 241 191 68 4 27 0 0| 0 204M| 0 0 | 49k 5550 | 262 241 194 68 8 24 0 0| 0 161M| 0 0 | 53k 6368 | 255 244 198 79 7 14 0 0| 0 325M| 0 0 | 59k 5910 | 264 249 202 72 6 22 0 0| 0 367M| 0 0 | 54k 6684 | 253 249 205 71 6 22 1 0| 0 377M| 0 0 | 52k 8175 | 284 257 211 48 8 44 0 0| 0 417M| 0 0 | 40k 5878 | 223 247 210 23 4 73 0 0| 0 238M| 0 0 | 22k 1644 | 114 214 201 0 0 100 0 0| 0 264M| 0 0 |1016 813 |43.3 175 189 0 0 100 0 0| 0 95M| 0 0 | 670 480 |17.1 144 177 As one can see in the above dstat traces, ninja-master will have a high 1min load average, of up to 462. This is because ninja will not considered the remaining load capacity when spawning new jobs, but instead spawn as new jobs until it runs into the -j limitation. This, in turn, causes an increase of context switches: the rows with a high 1min load average also have >10k context switches (csw). Whereas a remaining load-capacity aware ninja avoids oversaturing the system with excessive additional jobs. Note that since the load average is an exponentially damped moving sum, build systems that take the load average into consideration to limit the load average to the number of available processors will always (slightly) overprovision the system with tasks. Eventually, this change decreases the aggressiveness ninja schedules new jobs if the '-l' knob is used, and by that, the level of overprovisioning, to a reasonable level compared to the status quo. It should be mentioned that this means that an individual build using '-l' will now be potentially a bit slower. However, this can easily be fixed by increase the value provided to the '-l' argument. The benchmarks where performed using the following script: set -euo pipefail VANILLA_NINJA=~/code/ninja-master/build/ninja LOAD_CAPACITY_AWARE_NINJA=~/code/ninja-load-capacity/build/ninja CMAKE_NINJA_PROJECT_SOURCE=~/code/llvm-project/llvm declare -ir PARALLEL_BUILDS=8 readonly TMP_DIR=$(mktemp --directory --tmpdir=/var/tmp) cleanup() { rm -rf "${TMP_DIR}" } trap cleanup EXIT BUILD_DIRS=() echo "Preparing build directories" for i in $(seq 1 ${PARALLEL_BUILDS}); do BUILD_DIR="${TMP_DIR}/${i}" mkdir "${BUILD_DIR}" ( cd "${BUILD_DIR}" cmake -G Ninja "${CMAKE_NINJA_PROJECT_SOURCE}" \ &> "${BUILD_DIR}/build.log" )& BUILD_DIRS+=("${BUILD_DIR}") done wait NPROC=$(nproc) MAX_LOAD=$(echo "${NPROC} + 2" | bc ) SLEEP_SECONDS=300 NINJA_BINS=( "${VANILLA_NINJA}" "${LOAD_CAPACITY_AWARE_NINJA}" ) LAST_NINJA_BIN="${LOAD_CAPACITY_AWARE_NINJA}" for NINJA_BIN in "${NINJA_BINS[@]}"; do echo "Cleaning build dirs" for BUILD_DIR in "${BUILD_DIRS[@]}"; do ( "${NINJA_BIN}" -C "${BUILD_DIR}" clean &> "${BUILD_DIR}/build.log" )& done wait echo "Starting ${PARALLEL_BUILDS} parallel builds with ${NINJA_BIN} using -j ${MAX_LOAD}" START=$(date +%s) for BUILD_DIR in "${BUILD_DIRS[@]}"; do ( "${NINJA_BIN}" -C "${BUILD_DIR}" -l "${MAX_LOAD}" &> "${BUILD_DIR}/build.log" )& done wait STOP=$(date +%s) DELTA_SECONDS=$((STOP - START)) echo "Using ${NINJA_BIN} to perform ${PARALLEL_BUILDS} of ${CMAKE_NINJA_PROJECT_SOURCE}" echo "took ${DELTA_SECONDS} seconds on this ${NPROC} core system using -j ${MAX_LOAD}" echo "/proc/loadavg:" cat /proc/loadavg echo "ninja --version:" "${NINJA_BIN}" --version if [[ "${NINJA_BIN}" != "${LAST_NINJA_BIN}" ]]; then echo "Sleeping ${SLEEP_SECONDS} seconds to bring system into quiescent state" sleep ${SLEEP_SECONDS} fi done --- src/build.cc | 55 ++++++++++++++++++++++++++++++++++++----------- src/build.h | 2 +- src/build_test.cc | 11 +++++++--- 3 files changed, 51 insertions(+), 17 deletions(-) diff --git a/src/build.cc b/src/build.cc index 6903e45306..782bed553a 100644 --- a/src/build.cc +++ b/src/build.cc @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #if defined(__SVR4) && defined(__sun) @@ -46,7 +48,7 @@ struct DryRunCommandRunner : public CommandRunner { virtual ~DryRunCommandRunner() {} // Overridden from CommandRunner: - virtual bool CanRunMore() const; + virtual size_t CanRunMore() const; virtual bool StartCommand(Edge* edge); virtual bool WaitForCommand(Result* result); @@ -54,8 +56,8 @@ struct DryRunCommandRunner : public CommandRunner { queue finished_; }; -bool DryRunCommandRunner::CanRunMore() const { - return true; +size_t DryRunCommandRunner::CanRunMore() const { + return SIZE_MAX; } bool DryRunCommandRunner::StartCommand(Edge* edge) { @@ -455,7 +457,7 @@ void Plan::Dump() const { struct RealCommandRunner : public CommandRunner { explicit RealCommandRunner(const BuildConfig& config) : config_(config) {} virtual ~RealCommandRunner() {} - virtual bool CanRunMore() const; + virtual size_t CanRunMore() const; virtual bool StartCommand(Edge* edge); virtual bool WaitForCommand(Result* result); virtual vector GetActiveEdges(); @@ -478,12 +480,26 @@ void RealCommandRunner::Abort() { subprocs_.Clear(); } -bool RealCommandRunner::CanRunMore() const { +size_t RealCommandRunner::CanRunMore() const { size_t subproc_number = subprocs_.running_.size() + subprocs_.finished_.size(); - return (int)subproc_number < config_.parallelism - && ((subprocs_.running_.empty() || config_.max_load_average <= 0.0f) - || GetLoadAverage() < config_.max_load_average); + + int64_t capacity = config_.parallelism - subproc_number; + + if (config_.max_load_average > 0.0f) { + int load_capacity = config_.max_load_average - GetLoadAverage(); + if (load_capacity < capacity) + capacity = load_capacity; + } + + if (capacity < 0) + capacity = 0; + + if (capacity == 0 && subprocs_.running_.empty()) + // Ensure that we make progress. + capacity = 1; + + return capacity; } bool RealCommandRunner::StartCommand(Edge* edge) { @@ -634,8 +650,13 @@ bool Builder::Build(string* err) { // Second, we attempt to wait for / reap the next finished command. while (plan_.more_to_do()) { // See if we can start any more commands. - if (failures_allowed && command_runner_->CanRunMore()) { - if (Edge* edge = plan_.FindWork()) { + if (failures_allowed) { + size_t capacity = command_runner_->CanRunMore(); + while (capacity > 0) { + Edge* edge = plan_.FindWork(); + if (!edge) + break; + if (edge->GetBindingBool("generator")) { scan_.build_log()->Close(); } @@ -654,11 +675,19 @@ bool Builder::Build(string* err) { } } else { ++pending_commands; - } - // We made some progress; go back to the main loop. - continue; + --capacity; + + // Re-evaluate capacity. + size_t current_capacity = command_runner_->CanRunMore(); + if (current_capacity < capacity) + capacity = current_capacity; + } } + + // We are finished with all work items and have no pending + // commands. Therefore, break out of the main loop. + if (pending_commands == 0 && !plan_.more_to_do()) break; } // See if we can reap any finished commands. diff --git a/src/build.h b/src/build.h index 8ec2355f7e..c4a49a39cf 100644 --- a/src/build.h +++ b/src/build.h @@ -135,7 +135,7 @@ struct Plan { /// RealCommandRunner is an implementation that actually runs commands. struct CommandRunner { virtual ~CommandRunner() {} - virtual bool CanRunMore() const = 0; + virtual size_t CanRunMore() const = 0; virtual bool StartCommand(Edge* edge) = 0; /// The result of waiting for a command. diff --git a/src/build_test.cc b/src/build_test.cc index 8152b4e312..a9bffbb81f 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -15,6 +15,8 @@ #include "build.h" #include +#include +#include #include "build_log.h" #include "deps_log.h" @@ -473,7 +475,7 @@ struct FakeCommandRunner : public CommandRunner { max_active_edges_(1), fs_(fs) {} // CommandRunner impl - virtual bool CanRunMore() const; + virtual size_t CanRunMore() const; virtual bool StartCommand(Edge* edge); virtual bool WaitForCommand(Result* result); virtual vector GetActiveEdges(); @@ -574,8 +576,11 @@ void BuildTest::RebuildTarget(const string& target, const char* manifest, builder.command_runner_.release(); } -bool FakeCommandRunner::CanRunMore() const { - return active_edges_.size() < max_active_edges_; +size_t FakeCommandRunner::CanRunMore() const { + if (active_edges_.size() < max_active_edges_) + return SIZE_MAX; + + return 0; } bool FakeCommandRunner::StartCommand(Edge* edge) { From 8f47d5aa33c6c303a71093be2eac02672dfb2966 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Wed, 29 Nov 2023 21:16:06 +0100 Subject: [PATCH 105/127] Remove `-w dupbuild` completely, always error on duplicate edges Step 5, fixes #931. --- src/manifest_parser.cc | 15 ++------- src/manifest_parser.h | 6 +--- src/manifest_parser_test.cc | 61 ++++++++++--------------------------- src/ninja.cc | 18 ++--------- 4 files changed, 21 insertions(+), 79 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index 103c365ebf..aa52989882 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -337,19 +337,8 @@ bool ManifestParser::ParseEdge(string* err) { uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); if (!state_->AddOut(edge, path, slash_bits)) { - if (options_.dupe_edge_action_ == kDupeEdgeActionError) { - lexer_.Error("multiple rules generate " + path, err); - return false; - } else { - if (!quiet_) { - Warning( - "multiple rules generate %s. builds involving this target will " - "not be correct; continuing anyway", - path.c_str()); - } - if (e - i <= static_cast(implicit_outs)) - --implicit_outs; - } + lexer_.Error("multiple rules generate " + path, err); + return false; } } diff --git a/src/manifest_parser.h b/src/manifest_parser.h index 954cf467d5..db6812dce4 100644 --- a/src/manifest_parser.h +++ b/src/manifest_parser.h @@ -31,11 +31,7 @@ enum PhonyCycleAction { }; struct ManifestParserOptions { - ManifestParserOptions() - : dupe_edge_action_(kDupeEdgeActionWarn), - phony_cycle_action_(kPhonyCycleActionWarn) {} - DupeEdgeAction dupe_edge_action_; - PhonyCycleAction phony_cycle_action_; + PhonyCycleAction phony_cycle_action_ = kPhonyCycleActionWarn; }; /// Parses .ninja files. diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index 66b72e2d4a..8a7b135d3d 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -330,29 +330,6 @@ TEST_F(ParserTest, CanonicalizePathsBackslashes) { } #endif -TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) { - ASSERT_NO_FATAL_FAILURE(AssertParse( -"rule cat\n" -" command = cat $in > $out\n" -"build out1 out2: cat in1\n" -"build out1: cat in2\n" -"build final: cat out1\n" -)); - // AssertParse() checks that the generated build graph is self-consistent. - // That's all the checking that this test needs. -} - -TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) { - ASSERT_NO_FATAL_FAILURE(AssertParse( -"rule cat\n" -" command = cat $in > $out\n" -"build out: cat in\n" -"build out: cat in\n" -)); - // AssertParse() checks that the generated build graph is self-consistent. - // That's all the checking that this test needs. -} - TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) { const char kInput[] = "rule cat\n" @@ -360,9 +337,7 @@ TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) { "build out1 out2: cat in1\n" "build out1: cat in2\n" "build final: cat out1\n"; - ManifestParserOptions parser_opts; - parser_opts.dupe_edge_action_ = kDupeEdgeActionError; - ManifestParser parser(&state, &fs_, parser_opts); + ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:5: multiple rules generate out1\n", err); @@ -377,9 +352,7 @@ TEST_F(ParserTest, DuplicateEdgeInIncludedFile) { "build final: cat out1\n"); const char kInput[] = "subninja sub.ninja\n"; - ManifestParserOptions parser_opts; - parser_opts.dupe_edge_action_ = kDupeEdgeActionError; - ManifestParser parser(&state, &fs_, parser_opts); + ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("sub.ninja:5: multiple rules generate out1\n", err); @@ -997,28 +970,26 @@ TEST_F(ParserTest, ImplicitOutputEmpty) { EXPECT_FALSE(edge->is_implicit_out(0)); } -TEST_F(ParserTest, ImplicitOutputDupe) { - ASSERT_NO_FATAL_FAILURE(AssertParse( +TEST_F(ParserTest, ImplicitOutputDupeError) { + const char kInput[] = "rule cat\n" " command = cat $in > $out\n" -"build foo baz | foo baq foo: cat bar\n")); - - Edge* edge = state.LookupNode("foo")->in_edge(); - ASSERT_EQ(edge->outputs_.size(), 3); - EXPECT_FALSE(edge->is_implicit_out(0)); - EXPECT_FALSE(edge->is_implicit_out(1)); - EXPECT_TRUE(edge->is_implicit_out(2)); +"build foo baz | foo baq foo: cat bar\n"; + ManifestParser parser(&state, &fs_); + string err; + EXPECT_FALSE(parser.ParseTest(kInput, &err)); + EXPECT_EQ("input:4: multiple rules generate foo\n", err); } -TEST_F(ParserTest, ImplicitOutputDupes) { - ASSERT_NO_FATAL_FAILURE(AssertParse( +TEST_F(ParserTest, ImplicitOutputDupesError) { + const char kInput[] = "rule cat\n" " command = cat $in > $out\n" -"build foo foo foo | foo foo foo foo: cat bar\n")); - - Edge* edge = state.LookupNode("foo")->in_edge(); - ASSERT_EQ(edge->outputs_.size(), 1); - EXPECT_FALSE(edge->is_implicit_out(0)); +"build foo foo foo | foo foo foo foo: cat bar\n"; + ManifestParser parser(&state, &fs_); + string err; + EXPECT_FALSE(parser.ParseTest(kInput, &err)); + EXPECT_EQ("input:4: multiple rules generate foo\n", err); } TEST_F(ParserTest, NoExplicitOutput) { diff --git a/src/ninja.cc b/src/ninja.cc index c011be1356..839e9e6fce 100644 --- a/src/ninja.cc +++ b/src/ninja.cc @@ -77,9 +77,6 @@ struct Options { /// Tool to run rather than building. const Tool* tool; - /// Whether duplicate rules for one target should warn or print an error. - bool dupe_edges_should_err; - /// Whether phony cycles should warn or print an error. bool phony_cycle_should_err; }; @@ -1210,12 +1207,6 @@ bool WarningEnable(const string& name, Options* options) { " phonycycle={err,warn} phony build statement references itself\n" ); return false; - } else if (name == "dupbuild=err") { - options->dupe_edges_should_err = true; - return true; - } else if (name == "dupbuild=warn") { - options->dupe_edges_should_err = false; - return true; } else if (name == "phonycycle=err") { options->phony_cycle_should_err = true; return true; @@ -1227,9 +1218,8 @@ bool WarningEnable(const string& name, Options* options) { Warning("deprecated warning 'depfilemulti'"); return true; } else { - const char* suggestion = - SpellcheckString(name.c_str(), "dupbuild=err", "dupbuild=warn", - "phonycycle=err", "phonycycle=warn", NULL); + const char* suggestion = SpellcheckString(name.c_str(), "phonycycle=err", + "phonycycle=warn", nullptr); if (suggestion) { Error("unknown warning flag '%s', did you mean '%s'?", name.c_str(), suggestion); @@ -1525,7 +1515,6 @@ NORETURN void real_main(int argc, char** argv) { BuildConfig config; Options options = {}; options.input_file = "build.ninja"; - options.dupe_edges_should_err = true; setvbuf(stdout, NULL, _IOLBF, BUFSIZ); const char* ninja_command = argv[0]; @@ -1562,9 +1551,6 @@ NORETURN void real_main(int argc, char** argv) { NinjaMain ninja(ninja_command, config); ManifestParserOptions parser_opts; - if (options.dupe_edges_should_err) { - parser_opts.dupe_edge_action_ = kDupeEdgeActionError; - } if (options.phony_cycle_should_err) { parser_opts.phony_cycle_action_ = kPhonyCycleActionError; } From 4d98903d4c986f720ddb3f18d32c1125ef3e680e Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Wed, 6 Dec 2023 20:49:28 +0100 Subject: [PATCH 106/127] Improve misleading error message when an output is defined multiple times --- src/manifest_parser.cc | 4 ++-- src/manifest_parser_test.cc | 4 ++-- src/missing_deps_test.cc | 6 +++--- src/state.cc | 11 +++++++++-- src/state.h | 2 +- src/state_test.cc | 2 +- 6 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc index aa52989882..c4b2980164 100644 --- a/src/manifest_parser.cc +++ b/src/manifest_parser.cc @@ -336,8 +336,8 @@ bool ManifestParser::ParseEdge(string* err) { return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); - if (!state_->AddOut(edge, path, slash_bits)) { - lexer_.Error("multiple rules generate " + path, err); + if (!state_->AddOut(edge, path, slash_bits, err)) { + lexer_.Error(std::string(*err), err); return false; } } diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc index 8a7b135d3d..c5a1fe8fd2 100644 --- a/src/manifest_parser_test.cc +++ b/src/manifest_parser_test.cc @@ -978,7 +978,7 @@ TEST_F(ParserTest, ImplicitOutputDupeError) { ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); - EXPECT_EQ("input:4: multiple rules generate foo\n", err); + EXPECT_EQ("input:4: foo is defined as an output multiple times\n", err); } TEST_F(ParserTest, ImplicitOutputDupesError) { @@ -989,7 +989,7 @@ TEST_F(ParserTest, ImplicitOutputDupesError) { ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); - EXPECT_EQ("input:4: multiple rules generate foo\n", err); + EXPECT_EQ("input:4: foo is defined as an output multiple times\n", err); } TEST_F(ParserTest, NoExplicitOutput) { diff --git a/src/missing_deps_test.cc b/src/missing_deps_test.cc index 12ae8ed42e..dae377b49d 100644 --- a/src/missing_deps_test.cc +++ b/src/missing_deps_test.cc @@ -64,9 +64,9 @@ struct MissingDependencyScannerTest : public testing::Test { compile_rule_.AddBinding("deps", deps_type); generator_rule_.AddBinding("deps", deps_type); Edge* header_edge = state_.AddEdge(&generator_rule_); - state_.AddOut(header_edge, "generated_header", 0); + state_.AddOut(header_edge, "generated_header", 0, nullptr); Edge* compile_edge = state_.AddEdge(&compile_rule_); - state_.AddOut(compile_edge, "compiled_object", 0); + state_.AddOut(compile_edge, "compiled_object", 0, nullptr); } void CreateGraphDependencyBetween(const char* from, const char* to) { @@ -130,7 +130,7 @@ TEST_F(MissingDependencyScannerTest, MissingDepFixedIndirect) { CreateInitialState(); // Adding an indirect dependency also fixes the issue Edge* intermediate_edge = state_.AddEdge(&generator_rule_); - state_.AddOut(intermediate_edge, "intermediate", 0); + state_.AddOut(intermediate_edge, "intermediate", 0, nullptr); CreateGraphDependencyBetween("compiled_object", "intermediate"); CreateGraphDependencyBetween("intermediate", "generated_header"); RecordDepsLogDep("compiled_object", "generated_header"); diff --git a/src/state.cc b/src/state.cc index 0a68f2163a..d4b9a71a50 100644 --- a/src/state.cc +++ b/src/state.cc @@ -133,10 +133,17 @@ void State::AddIn(Edge* edge, StringPiece path, uint64_t slash_bits) { node->AddOutEdge(edge); } -bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits) { +bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits, + std::string* err) { Node* node = GetNode(path, slash_bits); - if (node->in_edge()) + if (Edge* other = node->in_edge()) { + if (other == edge) { + *err = path.AsString() + " is defined as an output multiple times"; + } else { + *err = "multiple rules generate " + path.AsString(); + } return false; + } edge->outputs_.push_back(node); node->set_in_edge(edge); node->set_generated_by_dep_loader(false); diff --git a/src/state.h b/src/state.h index 886b78f765..29bed56193 100644 --- a/src/state.h +++ b/src/state.h @@ -109,7 +109,7 @@ struct State { /// ensures that the generated_by_dep_loader() flag for all these nodes /// is set to false, to indicate that they come from the input manifest. void AddIn(Edge* edge, StringPiece path, uint64_t slash_bits); - bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits); + bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits, std::string* err); void AddValidation(Edge* edge, StringPiece path, uint64_t slash_bits); bool AddDefault(StringPiece path, std::string* error); diff --git a/src/state_test.cc b/src/state_test.cc index 96469f933a..e0e3060c19 100644 --- a/src/state_test.cc +++ b/src/state_test.cc @@ -36,7 +36,7 @@ TEST(State, Basic) { Edge* edge = state.AddEdge(rule); state.AddIn(edge, "in1", 0); state.AddIn(edge, "in2", 0); - state.AddOut(edge, "out", 0); + state.AddOut(edge, "out", 0, nullptr); EXPECT_EQ("cat in1 in2 > out", edge->EvaluateCommand()); From 4d38849501ca09c2dcd9e778b59f9a4ec32180dd Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Mon, 4 Dec 2023 22:47:37 +0100 Subject: [PATCH 107/127] CanonicalizePath: Remove kMaxComponents limit This patch refactors the CanonicalizePath() to fix two issues and improve performance. This is achieved through the following: - Remove the kMaxPathComponents limit entirely, which fixes ninja-build#1732, by dropping the `components` array entirely, in favor of back-tracking the destination pointer. - Properly handle '/' and '\' which were incorrectly converted into an empty string. This fixes ninja-build#2008. - Skip initial '../' components in relative paths, as these are common when referencing source files in build plans, and doing so make the loop after this step run faster in practice since most source files do not need adjustments. - Simplify the inner loop logic by handling the last component (which is not followed by a trailing directory separator) separately. This noticeably improves performance because the inner loop becomes smaller with less branch mis-predictions in the general case. - Never access or copy the caharacter after the end of the input string. - Use memchr() to find the next '/' on Posix, which allows the use of SIMD implementations provided by the C runtime (e.g. through IFUNC functions on Linux), resulting in very noticeable speedup. This is also why a statically Ninja executable will be slower than one that links to the C library dynamically :-/ - Avoid performing any writes when the input path doesn't need any adjustment, which is also quite common. Note that this patch does _not_ remove the 64-bit limit for the `slash_bits` value, which is only used on Win32. Benchmarking was done in several ways: - On Linux, running `hyperfine canon-perftest` to run the canonicalization benchmark program and measure its total running time. Three compilers were used to generate dynamically-linked executables. ``` BEFORE (ms) AFTER (ms) GCC 13.2.0 651 369 Clang 14.0 591 402 Clang 18,0 653 400 ``` - On Windows, running `canon-perftest` 5 times and keeping the best reported average result. The number are slower since they only measure the benched function. ``` BEFORE (ms) AFTER (ms) Mingw64 GCC 12 246 195 ``` - On Linux, run `hyperfine ninja -C out/default -n --quiet` on a large Fuchsia build plan, once with 70000+ pending commands, and once after the build (i.e. `ninja: no work to do`). ```` BEFORE (s) AFTER (s) pre_build 8.789 8.647 post_build 6.703 6.590 ``` --- src/util.cc | 149 +++++++++++++++++++++++++++++++++++------------ src/util_test.cc | 101 ++++++++++++++++++++++++++++++-- 2 files changed, 208 insertions(+), 42 deletions(-) diff --git a/src/util.cc b/src/util.cc index 0553de346c..5f67fcf403 100644 --- a/src/util.cc +++ b/src/util.cc @@ -143,20 +143,19 @@ void CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits) { return; } - const int kMaxPathComponents = 60; - char* components[kMaxPathComponents]; - int component_count = 0; - char* start = path; char* dst = start; + char* dst_start = dst; const char* src = start; const char* end = start + *len; + const char* src_next; + // For absolute paths, skip the leading directory separator + // as this one should never be removed from the result. if (IsPathSeparator(*src)) { #ifdef _WIN32 - - // network path starts with // - if (*len > 1 && IsPathSeparator(*(src + 1))) { + // Windows network path starts with // + if (src + 2 <= end && IsPathSeparator(src[1])) { src += 2; dst += 2; } else { @@ -167,50 +166,126 @@ void CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits) { ++src; ++dst; #endif + dst_start = dst; + } else { + // For relative paths, skip any leading ../ as these are quite common + // to reference source files in build plans, and doing this here makes + // the loop work below faster in general. + while (src + 3 <= end && src[0] == '.' && src[1] == '.' && + IsPathSeparator(src[2])) { + src += 3; + dst += 3; + } } - while (src < end) { - if (*src == '.') { - if (src + 1 == end || IsPathSeparator(src[1])) { - // '.' component; eliminate. - src += 2; - continue; - } else if (src[1] == '.' && (src + 2 == end || IsPathSeparator(src[2]))) { - // '..' component. Back up if possible. + // Loop over all components of the paths _except_ the last one, in + // order to simplify the loop's code and make it faster. + int component_count = 0; + char* dst0 = dst; + for (; src < end; src = src_next) { +#ifndef _WIN32 + // Use memchr() for faster lookups thanks to optimized C library + // implementation. `hyperfine canon_perftest` shows a significant + // difference (e,g, 484ms vs 437ms). + const char* next_sep = + static_cast(::memchr(src, '/', end - src)); + if (!next_sep) { + // This is the last component, will be handled out of the loop. + break; + } +#else + // Need to check for both '/' and '\\' so do not use memchr(). + // Cannot use strpbrk() because end[0] can be \0 or something else! + const char* next_sep = src; + while (next_sep != end && !IsPathSeparator(*next_sep)) + ++next_sep; + if (next_sep == end) { + // This is the last component, will be handled out of the loop. + break; + } +#endif + // Position for next loop iteration. + src_next = next_sep + 1; + // Length of the component, excluding trailing directory. + size_t component_len = next_sep - src; + + if (component_len <= 2) { + if (component_len == 0) { + continue; // Ignore empty component, e.g. 'foo//bar' -> 'foo/bar'. + } + if (src[0] == '.') { + if (component_len == 1) { + continue; // Ignore '.' component, e.g. './foo' -> 'foo'. + } else if (src[1] == '.') { + // Process the '..' component if found. Back up if possible. + if (component_count > 0) { + // Move back to start of previous component. + --component_count; + while (--dst > dst0 && !IsPathSeparator(dst[-1])) { + // nothing to do here, decrement happens before condition check. + } + } else { + dst[0] = '.'; + dst[1] = '.'; + dst[2] = src[2]; + dst += 3; + } + continue; + } + } + } + ++component_count; + + // Copy or skip component, including trailing directory separator. + if (dst != src) { + ::memmove(dst, src, src_next - src); + } + dst += src_next - src; + } + + // Handling the last component that does not have a trailing separator. + // The logic here is _slightly_ different since there is no trailing + // directory separator. + size_t component_len = end - src; + do { + if (component_len == 0) + break; // Ignore empty component (e.g. 'foo//' -> 'foo/') + if (src[0] == '.') { + if (component_len == 1) + break; // Ignore trailing '.' (e.g. 'foo/.' -> 'foo/') + if (src[1] == '.') { + // Handle '..'. Back up if possible. if (component_count > 0) { - dst = components[component_count - 1]; - src += 3; - --component_count; + while (--dst > dst0 && !IsPathSeparator(dst[-1])) { + // nothing to do here, decrement happens before condition check. + } } else { - *dst++ = *src++; - *dst++ = *src++; - *dst++ = *src++; + dst[0] = '.'; + dst[1] = '.'; + dst += 2; + // No separator to add here. } - continue; + break; } } - - if (IsPathSeparator(*src)) { - src++; - continue; + // Skip or copy last component, no trailing separator. + if (dst != src) { + ::memmove(dst, src, component_len); } + dst += component_len; + } while (0); - if (component_count == kMaxPathComponents) - Fatal("path has too many components : %s", path); - components[component_count] = dst; - ++component_count; - - while (src != end && !IsPathSeparator(*src)) - *dst++ = *src++; - *dst++ = *src++; // Copy '/' or final \0 character as well. - } + // Remove trailing path separator if any, but keep the initial + // path separator(s) if there was one (or two on Windows). + if (dst > dst_start && IsPathSeparator(dst[-1])) + dst--; if (dst == start) { + // Handle special cases like "aa/.." -> "." *dst++ = '.'; - *dst++ = '\0'; } - *len = dst - start - 1; + *len = dst - start; // dst points after the trailing char here. #ifdef _WIN32 uint64_t bits = 0; uint64_t bits_mask = 1; diff --git a/src/util_test.cc b/src/util_test.cc index d58b17088c..8467e2a72b 100644 --- a/src/util_test.cc +++ b/src/util_test.cc @@ -89,13 +89,57 @@ TEST(CanonicalizePath, PathSamples) { EXPECT_EQ("/foo", path); #endif + path = ".."; + CanonicalizePath(&path); + EXPECT_EQ("..", path); + + path = "../"; + CanonicalizePath(&path); + EXPECT_EQ("..", path); + + path = "../foo"; + CanonicalizePath(&path); + EXPECT_EQ("../foo", path); + + path = "../foo/"; + CanonicalizePath(&path); + EXPECT_EQ("../foo", path); + + path = "../.."; + CanonicalizePath(&path); + EXPECT_EQ("../..", path); + + path = "../../"; + CanonicalizePath(&path); + EXPECT_EQ("../..", path); + + path = "./../"; + CanonicalizePath(&path); + EXPECT_EQ("..", path); + + path = "/.."; + CanonicalizePath(&path); + EXPECT_EQ("/..", path); + + path = "/../"; + CanonicalizePath(&path); + EXPECT_EQ("/..", path); + + path = "/../.."; + CanonicalizePath(&path); + EXPECT_EQ("/../..", path); + + path = "/../../"; + CanonicalizePath(&path); + EXPECT_EQ("/../..", path); + path = "/"; CanonicalizePath(&path); - EXPECT_EQ("", path); + EXPECT_EQ("/", path); path = "/foo/.."; CanonicalizePath(&path); - EXPECT_EQ("", path); + EXPECT_EQ("/", path); path = "."; CanonicalizePath(&path); @@ -171,7 +215,7 @@ TEST(CanonicalizePath, PathSamplesWindows) { path = "\\"; CanonicalizePath(&path); - EXPECT_EQ("", path); + EXPECT_EQ("/", path); } TEST(CanonicalizePath, SlashTracking) { @@ -321,8 +365,53 @@ TEST(CanonicalizePath, TooManyComponents) { EXPECT_EQ(58, std::count(path.begin(), path.end(), '\\')); CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0x3ffffffffffffff); + + // More than 60 components is now completely ok too. + path = + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" + "a\\a\\a\\a\\a\\a\\a\\a\\a\\x\\y.h"; + EXPECT_EQ(218, std::count(path.begin(), path.end(), '\\')); + CanonicalizePath(&path, &slash_bits); + EXPECT_EQ(slash_bits, 0xffffffffffffffff); } -#endif +#else // !_WIN32 +TEST(CanonicalizePath, TooManyComponents) { + string path; + uint64_t slash_bits; + + // More than 60 components is now completely ok. + path = + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" + "a/a/a/a/a/a/a/a/a/x/y.h"; + EXPECT_EQ(218, std::count(path.begin(), path.end(), '/')); + CanonicalizePath(&path, &slash_bits); + EXPECT_EQ(slash_bits, 0x0); +} +#endif // !_WIN32 TEST(CanonicalizePath, UpDir) { string path, err; @@ -353,11 +442,13 @@ TEST(CanonicalizePath, NotNullTerminated) { EXPECT_EQ(strlen("foo"), len); EXPECT_EQ("foo/. bar/.", string(path)); + // Verify that foo/..file gets canonicalized to 'file' without + // touching the rest of the string. path = "foo/../file bar/."; len = strlen("foo/../file"); CanonicalizePath(&path[0], &len, &unused); EXPECT_EQ(strlen("file"), len); - EXPECT_EQ("file ./file bar/.", string(path)); + EXPECT_EQ("file../file bar/.", string(path)); } TEST(PathEscaping, TortureTest) { From 8eedf9330a8f188290456cc3fd2c6f94fbd280ed Mon Sep 17 00:00:00 2001 From: yourfather <597494370@qq.com> Date: Fri, 8 Dec 2023 10:45:18 +0800 Subject: [PATCH 108/127] [FIX] compile: gcc version > 11.3, treat -Wmaybe-uninitialized as error --- CMakeLists.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index c9529d1562..8a5ef00c15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -252,6 +252,16 @@ if(BUILD_TESTING) if (NOT TARGET GTest::gtest) message(STATUS "Defining GTest::gtest alias to work-around bug in older release.") add_library(GTest::gtest ALIAS gtest) + + # NOTE: gtest uninit some variables, gcc >= 1.11.3 may cause error on compile. + # Remove this comment and six lines below, once ninja deps gtest-1.11.0 or above. + if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "1.11.3") + check_cxx_compiler_flag(-Wno-maybe-uninitialized flag_no_maybe_uninit) + if (flag_no_maybe_uninit) + target_compile_options(gtest PRIVATE -Wno-maybe-uninitialized) + endif() + endif() + endif() endif() From e40ed8e8fc3cacb89390db28f0acf712461b4bc4 Mon Sep 17 00:00:00 2001 From: Cole Faust Date: Sun, 31 Dec 2023 14:35:24 -0800 Subject: [PATCH 109/127] Add a dark mode to the docs --- doc/style.css | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/doc/style.css b/doc/style.css index 9976c03ac3..363e272b24 100644 --- a/doc/style.css +++ b/doc/style.css @@ -1,15 +1,22 @@ +:root { + color-scheme: light dark; +} + body { margin: 5ex 10ex; max-width: 80ex; line-height: 1.5; font-family: sans-serif; } + h1, h2, h3 { font-weight: normal; } + pre, code { font-family: x, monospace; } + pre { padding: 1ex; background: #eee; @@ -17,13 +24,32 @@ pre { min-width: 0; font-size: 90%; } +@media (prefers-color-scheme: dark) { + pre { + background: #333; + border: solid 1px #444; + } +} + code { color: #007; } +@media (prefers-color-scheme: dark) { + code { + color: #a7cec8; + } +} + div.chapter { margin-top: 4em; border-top: solid 2px black; } +@media (prefers-color-scheme: dark) { + div.chapter { + border-top: solid 2px white; + } +} + p { margin-top: 0; } From 8b42a300bd57b93a8037e5097041c639ade8f9db Mon Sep 17 00:00:00 2001 From: scivision Date: Tue, 16 Jan 2024 12:56:15 -0500 Subject: [PATCH 110/127] correction to #2360 in general, flags check have to be to the non-no option. Updates use of deprecated CMake command. --- CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8a5ef00c15..90e34188c4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -255,9 +255,9 @@ if(BUILD_TESTING) # NOTE: gtest uninit some variables, gcc >= 1.11.3 may cause error on compile. # Remove this comment and six lines below, once ninja deps gtest-1.11.0 or above. - if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "1.11.3") - check_cxx_compiler_flag(-Wno-maybe-uninitialized flag_no_maybe_uninit) - if (flag_no_maybe_uninit) + if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "1.11.3") + check_cxx_compiler_flag(-Wmaybe-uninitialized flag_maybe_uninit) + if (flag_maybe_uninit) target_compile_options(gtest PRIVATE -Wno-maybe-uninitialized) endif() endif() From ccf2339f4ab7b90b2d5bf1b6668250876aebea5c Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Thu, 11 Jan 2024 20:00:09 +0100 Subject: [PATCH 111/127] README.md: document Manual and Doxygen generation Fixes #2362 --- README.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/README.md b/README.md index 1ca56c5183..732ef28b2f 100644 --- a/README.md +++ b/README.md @@ -49,3 +49,39 @@ To run the unit tests: ``` ./build-cmake/ninja_test ``` + +## Generating documentation + +### Ninja Manual + +You must have `asciidoc` and `xsltproc` in your PATH, then do: + +``` +./configure.py +ninja manual doc/manual.pdf +``` + +Which will generate `doc/manual.html`. + +To generate the PDF version of the manual, you must have `dblatext` in your PATH then do: + +``` +./configure.py # only if you didn't do it previously. +ninja doc/manual.pdf +``` + +Which will generate `doc/manual.pdf`. + +### Doxygen documentation + +If you have `doxygen` installed, you can build documentation extracted from C++ +declarations and comments to help you navigate the code. Note that Ninja is a standalone +executable, not a library, so there is no public API, all details exposed here are +internal. + +``` +./configure.py # if needed +ninja doxygen +``` + +Then open `doc/doxygen/html/index.html` in a browser to look at it. From 74d795cb9c8dee0172bfe585e3725a4adf1847dc Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 13 Feb 2024 14:59:29 +0100 Subject: [PATCH 112/127] GitHub Actions: Don't specify patch version for actions/upload-release-asset --- .github/workflows/linux.yml | 4 ++-- .github/workflows/macos.yml | 2 +- .github/workflows/windows.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 4a17f0e084..536754d94b 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -65,7 +65,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0.1 + uses: actions/upload-release-asset@v1.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -198,7 +198,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0.1 + uses: actions/upload-release-asset@v1.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 5a230ae748..dee9ab00b1 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -43,7 +43,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0.1 + uses: actions/upload-release-asset@v1.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 08bb3478b9..7c1c961374 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -57,7 +57,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0.1 + uses: actions/upload-release-asset@v1.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: From a1ad0fb190d05c1547f3cd5237972259f1df41b1 Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Tue, 13 Feb 2024 15:00:46 +0100 Subject: [PATCH 113/127] GitHub Actions: Only specify major version for actions/upload-release-asset --- .github/workflows/linux.yml | 4 ++-- .github/workflows/macos.yml | 2 +- .github/workflows/windows.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 536754d94b..b08e831b62 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -65,7 +65,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0 + uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -198,7 +198,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0 + uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index dee9ab00b1..d3dd9eef0f 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -43,7 +43,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0 + uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7c1c961374..e169eb4d38 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -57,7 +57,7 @@ jobs: - name: Upload release asset if: github.event.action == 'published' - uses: actions/upload-release-asset@v1.0 + uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: From 9a4f715d2b353492b118d0833bad6e506c4398cd Mon Sep 17 00:00:00 2001 From: Mihail Szabolcs Date: Sun, 18 Feb 2024 14:09:26 +0200 Subject: [PATCH 114/127] added missing include guards These changes make it easy to build an amalgamated `ninja.cc` that can be used to bootstrap ninja with just a working C++ compiler, without the need for any third-party tools like `cmake` or `python`. *nix c++ -O2 src/ninja_amalgamated.cc -o ninja osx-cross x86_64-apple-darwin19-c++ -O2 src/one.cc -o ninja mingw x86_64-w64-mingw32-c++ -O2 src/ninja_amalgamated.cc -o ninja.exe msvc cl.exe /nologo /Ox /GR- src\ninja_amalgamated.cc /out:ninja.exe --- src/includes_normalize.h | 5 +++++ src/msvc_helper.h | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/src/includes_normalize.h b/src/includes_normalize.h index 7d505564ad..8d29a64a93 100644 --- a/src/includes_normalize.h +++ b/src/includes_normalize.h @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#ifndef INCLUDES_NORMALIZE_H_ +#define INCLUDES_NORMALIZE_H_ + #include #include @@ -38,3 +41,5 @@ struct IncludesNormalize { std::string relative_to_; std::vector split_relative_to_; }; + +#endif // INCLUDES_NORMALIZE_H_ diff --git a/src/msvc_helper.h b/src/msvc_helper.h index 568b9f94e5..699b0a132d 100644 --- a/src/msvc_helper.h +++ b/src/msvc_helper.h @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#ifndef MSVC_HELPER_H_ +#define MSVC_HELPER_H_ + #include std::string EscapeForDepfile(const std::string& path); @@ -30,3 +33,5 @@ struct CLWrapper { void* env_block_; }; + +#endif // MSVC_HELPER_H_ From ab510c7a8cccbea0ea2c82531dc23893b551d55e Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Thu, 29 Feb 2024 19:03:15 +0100 Subject: [PATCH 115/127] Fix comparision error between pointer and NULL --- src/build_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/build_test.cc b/src/build_test.cc index c9cc6478eb..429e71cf16 100644 --- a/src/build_test.cc +++ b/src/build_test.cc @@ -504,7 +504,7 @@ TEST_F(PlanTest, PriorityWithoutBuildLog) { "a1", "a0", "b0", "c0", "out"}; for (int i = 0; i < n_edges; ++i) { Edge* edge = plan_.FindWork(); - ASSERT_NE(edge, NULL); + ASSERT_TRUE(edge != nullptr); EXPECT_EQ(expected_order[i], edge->outputs_[0]->path()); std::string err; From df4c6a461ef29b1a7e7be9d50453ef7a3b38e6f3 Mon Sep 17 00:00:00 2001 From: von Heydebrand Julian Date: Thu, 14 Mar 2024 14:28:55 +0100 Subject: [PATCH 116/127] Fix crash when FormatMessageA sets lpBuffer to nullptr The std::string constructor would try to perform a strlen() call on the invalid pointer --- src/util.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/util.cc b/src/util.cc index 5f67fcf403..2a43775972 100644 --- a/src/util.cc +++ b/src/util.cc @@ -540,6 +540,13 @@ string GetLastErrorString() { (char*)&msg_buf, 0, NULL); + + if (msg_buf == nullptr) { + char fallback_msg[128] = {0}; + snprintf(fallback_msg, sizeof(fallback_msg), "GetLastError() = %d", err); + return fallback_msg; + } + string msg = msg_buf; LocalFree(msg_buf); return msg; From 58851eb9eb9c5c177cf299deb4c03efea00d2051 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Thu, 14 Mar 2024 15:29:54 +0100 Subject: [PATCH 117/127] Minor fix to output_test.py Do not use os.chdir() to change the current directory inside the run() function, as doing this prevents the temporary directory from being removed. Moreover, this breaks pytest invocations when adding new regression test scripts in this directory (as done in other forks). + Use dict.pop() to undefine environment variables in `default_env` dictionary. --- misc/output_test.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/misc/output_test.py b/misc/output_test.py index a09448255e..78848cbd4c 100755 --- a/misc/output_test.py +++ b/misc/output_test.py @@ -13,29 +13,26 @@ import unittest default_env = dict(os.environ) -if 'NINJA_STATUS' in default_env: - del default_env['NINJA_STATUS'] -if 'CLICOLOR_FORCE' in default_env: - del default_env['CLICOLOR_FORCE'] +default_env.pop('NINJA_STATUS', None) +default_env.pop('CLICOLOR_FORCE', None) default_env['TERM'] = '' NINJA_PATH = os.path.abspath('./ninja') def run(build_ninja, flags='', pipe=False, env=default_env): with tempfile.TemporaryDirectory() as d: - os.chdir(d) - with open('build.ninja', 'w') as f: + with open(os.path.join(d, 'build.ninja'), 'w') as f: f.write(build_ninja) f.flush() ninja_cmd = '{} {}'.format(NINJA_PATH, flags) try: if pipe: - output = subprocess.check_output([ninja_cmd], shell=True, env=env) + output = subprocess.check_output([ninja_cmd], shell=True, cwd=d, env=env) elif platform.system() == 'Darwin': output = subprocess.check_output(['script', '-q', '/dev/null', 'bash', '-c', ninja_cmd], - env=env) + cwd=d, env=env) else: output = subprocess.check_output(['script', '-qfec', ninja_cmd, '/dev/null'], - env=env) + cwd=d, env=env) except subprocess.CalledProcessError as err: sys.stdout.buffer.write(err.output) raise err From 42f7b0a5552cdac395918f323691c0867d367e41 Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Thu, 14 Mar 2024 16:05:07 +0100 Subject: [PATCH 118/127] graph.h: Use default initializers to remove -Worder warnings. A recent pull request modified the Edge class default constructor but placed a member initializer at the wrong location, creating annoying -Worder compiler warnings with recent Clang versions. This is a recurrent problem every time we modify the classes in this header, so get rid of the problem once and for all by using C++11 default initializers when defining the members, and simplifying the constructor. Do the same for the Node class. --- src/graph.h | 60 +++++++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/src/graph.h b/src/graph.h index d49c3094de..b2657e2357 100644 --- a/src/graph.h +++ b/src/graph.h @@ -39,14 +39,7 @@ struct State; /// it's dirty, mtime, etc. struct Node { Node(const std::string& path, uint64_t slash_bits) - : path_(path), - slash_bits_(slash_bits), - mtime_(-1), - exists_(ExistenceStatusUnknown), - dirty_(false), - dyndep_pending_(false), - in_edge_(NULL), - id_(-1) {} + : path_(path), slash_bits_(slash_bits) {} /// Return false on error. bool Stat(DiskInterface* disk_interface, std::string* err); @@ -128,13 +121,13 @@ struct Node { /// Set bits starting from lowest for backslashes that were normalized to /// forward slashes by CanonicalizePath. See |PathDecanonicalized|. - uint64_t slash_bits_; + uint64_t slash_bits_ = 0; /// Possible values of mtime_: /// -1: file hasn't been examined /// 0: we looked, and file doesn't exist /// >0: actual file's mtime, or the latest mtime of its dependencies if it doesn't exist - TimeStamp mtime_; + TimeStamp mtime_ = -1; enum ExistenceStatus { /// The file hasn't been examined. @@ -144,16 +137,16 @@ struct Node { /// The path is an actual file. mtime_ will be the file's mtime. ExistenceStatusExists }; - ExistenceStatus exists_; + ExistenceStatus exists_ = ExistenceStatusUnknown; /// Dirty is true when the underlying file is out-of-date. /// But note that Edge::outputs_ready_ is also used in judging which /// edges to build. - bool dirty_; + bool dirty_ = false; /// Store whether dyndep information is expected from this node but /// has not yet been loaded. - bool dyndep_pending_; + bool dyndep_pending_ = false; /// Set to true when this node comes from a depfile, a dyndep file or the /// deps log. If it does not have a producing edge, the build should not @@ -164,7 +157,7 @@ struct Node { /// The Edge that produces this Node, or NULL when there is no /// known edge to produce it. - Edge* in_edge_; + Edge* in_edge_ = nullptr; /// All Edges that use this Node as an input. std::vector out_edges_; @@ -173,7 +166,7 @@ struct Node { std::vector validation_out_edges_; /// A dense integer id for the node, assigned and used by DepsLog. - int id_; + int id_ = -1; }; /// An edge in the dependency graph; links between Nodes using Rules. @@ -184,12 +177,7 @@ struct Edge { VisitDone }; - Edge() - : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone), - id_(0), outputs_ready_(false), deps_loaded_(false), - deps_missing_(false), generated_by_dep_loader_(false), - command_start_time_(0), implicit_deps_(0), order_only_deps_(0), - critical_path_weight_(-1), implicit_outs_(0) {} + Edge() = default; /// Return true if all inputs' in-edges are ready. bool AllInputsReady() const; @@ -224,21 +212,21 @@ struct Edge { critical_path_weight_ = critical_path_weight; } - const Rule* rule_; - Pool* pool_; + const Rule* rule_ = nullptr; + Pool* pool_ = nullptr; std::vector inputs_; std::vector outputs_; std::vector validations_; - Node* dyndep_; - BindingEnv* env_; - VisitMark mark_; - size_t id_; - int64_t critical_path_weight_; - bool outputs_ready_; - bool deps_loaded_; - bool deps_missing_; - bool generated_by_dep_loader_; - TimeStamp command_start_time_; + Node* dyndep_ = nullptr; + BindingEnv* env_ = nullptr; + VisitMark mark_ = VisitNone; + size_t id_ = 0; + int64_t critical_path_weight_ = -1; + bool outputs_ready_ = false; + bool deps_loaded_ = false; + bool deps_missing_ = false; + bool generated_by_dep_loader_ = false; + TimeStamp command_start_time_ = 0; const Rule& rule() const { return *rule_; } Pool* pool() const { return pool_; } @@ -253,8 +241,8 @@ struct Edge { // don't cause the target to rebuild. // These are stored in inputs_ in that order, and we keep counts of // #2 and #3 when we need to access the various subsets. - int implicit_deps_; - int order_only_deps_; + int implicit_deps_ = 0; + int order_only_deps_ = 0; bool is_implicit(size_t index) { return index >= inputs_.size() - order_only_deps_ - implicit_deps_ && !is_order_only(index); @@ -268,7 +256,7 @@ struct Edge { // 2) implicit outs, which the target generates but are not part of $out. // These are stored in outputs_ in that order, and we keep a count of // #2 to use when we need to access the various subsets. - int implicit_outs_; + int implicit_outs_ = 0; bool is_implicit_out(size_t index) const { return index >= outputs_.size() - implicit_outs_; } From 9792634a1762e74552ac14ffc51a81fa5a3f353e Mon Sep 17 00:00:00 2001 From: David 'Digit' Turner Date: Wed, 13 Mar 2024 16:26:47 +0100 Subject: [PATCH 119/127] Support empty depfiles. Some tools generate completely empty depfiles when there are no implicit inputs. This patch modifies the depfile parser to support them. Fixes #2357 --- src/depfile_parser.cc | 4 +++- src/depfile_parser.in.cc | 4 +++- src/depfile_parser_test.cc | 21 +++++++++++++++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/depfile_parser.cc b/src/depfile_parser.cc index 98fba2eda6..7ce7290614 100644 --- a/src/depfile_parser.cc +++ b/src/depfile_parser.cc @@ -54,6 +54,7 @@ bool DepfileParser::Parse(string* content, string* err) { bool have_target = false; bool parsing_targets = true; bool poisoned_input = false; + bool is_empty = true; while (in < end) { bool have_newline = false; // out: current output point (typically same as in, but can fall behind @@ -335,6 +336,7 @@ bool DepfileParser::Parse(string* content, string* err) { } if (len > 0) { + is_empty = false; StringPiece piece = StringPiece(filename, len); // If we've seen this as an input before, skip it. std::vector::iterator pos = std::find(ins_.begin(), ins_.end(), piece); @@ -363,7 +365,7 @@ bool DepfileParser::Parse(string* content, string* err) { poisoned_input = false; } } - if (!have_target) { + if (!have_target && !is_empty) { *err = "expected ':' in depfile"; return false; } diff --git a/src/depfile_parser.in.cc b/src/depfile_parser.in.cc index 75ba98227b..4b5f5fe4c1 100644 --- a/src/depfile_parser.in.cc +++ b/src/depfile_parser.in.cc @@ -53,6 +53,7 @@ bool DepfileParser::Parse(string* content, string* err) { bool have_target = false; bool parsing_targets = true; bool poisoned_input = false; + bool is_empty = true; while (in < end) { bool have_newline = false; // out: current output point (typically same as in, but can fall behind @@ -171,6 +172,7 @@ bool DepfileParser::Parse(string* content, string* err) { } if (len > 0) { + is_empty = false; StringPiece piece = StringPiece(filename, len); // If we've seen this as an input before, skip it. std::vector::iterator pos = std::find(ins_.begin(), ins_.end(), piece); @@ -199,7 +201,7 @@ bool DepfileParser::Parse(string* content, string* err) { poisoned_input = false; } } - if (!have_target) { + if (!have_target && !is_empty) { *err = "expected ':' in depfile"; return false; } diff --git a/src/depfile_parser_test.cc b/src/depfile_parser_test.cc index 8886258ebc..947ae764bc 100644 --- a/src/depfile_parser_test.cc +++ b/src/depfile_parser_test.cc @@ -378,3 +378,24 @@ TEST_F(DepfileParserTest, BuggyMP) { "z:\n", &err)); ASSERT_EQ("inputs may not also have inputs", err); } + +TEST_F(DepfileParserTest, EmptyFile) { + std::string err; + EXPECT_TRUE(Parse("", &err)); + ASSERT_EQ(0u, parser_.outs_.size()); + ASSERT_EQ(0u, parser_.ins_.size()); +} + +TEST_F(DepfileParserTest, EmptyLines) { + std::string err; + EXPECT_TRUE(Parse("\n\n", &err)); + ASSERT_EQ(0u, parser_.outs_.size()); + ASSERT_EQ(0u, parser_.ins_.size()); +} + +TEST_F(DepfileParserTest, MissingColon) { + // The file is not empty but is missing a colon separator. + std::string err; + EXPECT_FALSE(Parse("foo.o foo.c\n", &err)); + EXPECT_EQ("expected ':' in depfile", err); +} From e5413d3cc13a3a9de0b8ca6faadb813270687cba Mon Sep 17 00:00:00 2001 From: Pavel Boldin Date: Fri, 15 Mar 2024 15:21:16 +0000 Subject: [PATCH 120/127] CanonicalizePath: fix 'a/b/.._foo' -> 'a' replacement Signed-off-by: Pavel Boldin --- src/util.cc | 2 +- src/util_test.cc | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/util.cc b/src/util.cc index 5f67fcf403..b510a9bbf1 100644 --- a/src/util.cc +++ b/src/util.cc @@ -253,7 +253,7 @@ void CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits) { if (src[0] == '.') { if (component_len == 1) break; // Ignore trailing '.' (e.g. 'foo/.' -> 'foo/') - if (src[1] == '.') { + if (component_len == 2 && src[1] == '.') { // Handle '..'. Back up if possible. if (component_count > 0) { while (--dst > dst0 && !IsPathSeparator(dst[-1])) { diff --git a/src/util_test.cc b/src/util_test.cc index 8467e2a72b..d76954cce5 100644 --- a/src/util_test.cc +++ b/src/util_test.cc @@ -152,6 +152,10 @@ TEST(CanonicalizePath, PathSamples) { path = "foo/.."; CanonicalizePath(&path); EXPECT_EQ(".", path); + + path = "foo/.._bar"; + CanonicalizePath(&path); + EXPECT_EQ("foo/.._bar", path); } #ifdef _WIN32 From be64d2237d3ea5bb64fc3906797615ee90d8cacb Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Sat, 16 Mar 2024 18:05:44 +0100 Subject: [PATCH 121/127] GitHub Actions: Update Ubuntu versions to 20.04, 22.04 and 24.04 --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b08e831b62..fdebf0b342 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -133,7 +133,7 @@ jobs: image: ${{ matrix.image }} strategy: matrix: - image: ['ubuntu:14.04', 'ubuntu:16.04', 'ubuntu:18.04'] + image: ['ubuntu:20.04', 'ubuntu:22.04', 'ubuntu:24.04'] steps: - uses: actions/checkout@v2 - name: Install dependencies From 7712e30548059def780a5ba6f02a722ef57e1f5b Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Sat, 16 Mar 2024 18:08:57 +0100 Subject: [PATCH 122/127] AppVeyor: Update Ubuntu to 22.04 --- appveyor.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index ecc9f98315..505e1423af 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,7 +1,7 @@ version: 1.0.{build} image: - Visual Studio 2017 - - Ubuntu1804 + - Ubuntu2204 environment: CLICOLOR_FORCE: 1 @@ -14,7 +14,7 @@ matrix: exclude: - image: Visual Studio 2017 MSYSTEM: LINUX - - image: Ubuntu1804 + - image: Ubuntu2204 MSYSTEM: MINGW64 for: @@ -30,7 +30,7 @@ for: ./misc/ninja_syntax_test.py 2>&1\n\"@" - matrix: only: - - image: Ubuntu1804 + - image: Ubuntu2204 build_script: - ./configure.py --bootstrap - ./ninja all From 878aa468d144d854005a6d4f0c7785a5185f0bf9 Mon Sep 17 00:00:00 2001 From: von Heydebrand Julian Date: Wed, 13 Mar 2024 16:21:22 +0100 Subject: [PATCH 123/127] Gracefully handle outdated .ninja_log during '-t recompact' When we explicitly unlink the file we should return LOAD_NOT_FOUND instead of LOAD_SUCCESS --- misc/output_test.py | 23 +++++++++++++++++++++++ src/build_log.cc | 4 ++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/misc/output_test.py b/misc/output_test.py index 78848cbd4c..13b09269e0 100755 --- a/misc/output_test.py +++ b/misc/output_test.py @@ -127,6 +127,29 @@ def test_pr_1685(self): self.assertEqual(run('', flags='-t recompact'), '') self.assertEqual(run('', flags='-t restat'), '') + def test_issue_2048(self): + with tempfile.TemporaryDirectory() as d: + with open(os.path.join(d, 'build.ninja'), 'w'): + pass + + with open(os.path.join(d, '.ninja_log'), 'w') as f: + f.write('# ninja log v4\n') + + try: + output = subprocess.check_output([NINJA_PATH, '-t', 'recompact'], + cwd=d, + env=default_env, + stderr=subprocess.STDOUT, + text=True + ) + + self.assertEqual( + output.strip(), + "ninja: warning: build log version is too old; starting over" + ) + except subprocess.CalledProcessError as err: + self.fail("non-zero exit code with: " + err.output) + def test_status(self): self.assertEqual(run(''), 'ninja: no work to do.\n') self.assertEqual(run('', pipe=True), 'ninja: no work to do.\n') diff --git a/src/build_log.cc b/src/build_log.cc index cf2118251c..792d1a3e7e 100644 --- a/src/build_log.cc +++ b/src/build_log.cc @@ -291,9 +291,9 @@ LoadStatus BuildLog::Load(const string& path, string* err) { if (invalid_log_version) { fclose(file); unlink(path.c_str()); - // Don't report this as a failure. An empty build log will cause + // Don't report this as a failure. A missing build log will cause // us to rebuild the outputs anyway. - return LOAD_SUCCESS; + return LOAD_NOT_FOUND; } } From 003bb3b32f6f42c45f9eb51881a52ee2c370b522 Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Fri, 29 Mar 2024 13:32:37 -0700 Subject: [PATCH 124/127] cleandead: remove outputs specified by dyndep files Load this information before cleaning, as the normal `clean` operation does. This was accidentally missed when commit 714621db (Adding a way to clean dead build artifacts ..., 2018-04-27, v1.10.0~1^2~9^2~1) was rebased after the normal `clean` operation was updated by commit a3cbb4d (clean: remove outputs specified by dyndep files, 2019-02-12, v1.10.0~1^2~53^2~4). This fixes a bug causing artifacts to be spuriously flagged as "dead" and erased. --- src/clean.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/clean.cc b/src/clean.cc index 575bf6bd94..68273520b3 100644 --- a/src/clean.cc +++ b/src/clean.cc @@ -127,6 +127,7 @@ int Cleaner::CleanAll(bool generator) { int Cleaner::CleanDead(const BuildLog::Entries& entries) { Reset(); PrintHeader(); + LoadDyndeps(); for (BuildLog::Entries::const_iterator i = entries.begin(); i != entries.end(); ++i) { Node* n = state_->LookupNode(i->first); // Detecting stale outputs works as follows: From e3f44dd45625795e7b4b4b9877faff6ee9c07b5b Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Tue, 2 Apr 2024 09:52:11 -0700 Subject: [PATCH 125/127] clean: Improve performance in presence of dynamic dependencies Add code to check "pending" so as to avoid loading dyndep files that have already been loaded. This was missed by commit a3cbb4d (clean: remove outputs specified by dyndep files, 2019-02-12, v1.10.0~1^2~53^2~4). --- src/clean.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/clean.cc b/src/clean.cc index 68273520b3..ceffe64027 100644 --- a/src/clean.cc +++ b/src/clean.cc @@ -293,7 +293,8 @@ void Cleaner::LoadDyndeps() { // Load dyndep files that exist, before they are cleaned. for (vector::iterator e = state_->edges_.begin(); e != state_->edges_.end(); ++e) { - if (Node* dyndep = (*e)->dyndep_) { + Node* dyndep; + if ((dyndep = (*e)->dyndep_) && dyndep->dyndep_pending()) { // Capture and ignore errors loading the dyndep file. // We clean as much of the graph as we know. std::string err; From 9cabe6a3f3e69ea7ef3665c97e46d4f14433f7d8 Mon Sep 17 00:00:00 2001 From: Thaddeus Crews Date: Sat, 30 Mar 2024 14:35:18 -0500 Subject: [PATCH 126/127] Implement type hints in `ninja_syntax.py` --- misc/ninja_syntax.py | 74 +++++++++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 21 deletions(-) diff --git a/misc/ninja_syntax.py b/misc/ninja_syntax.py index ca73b5ba99..2aa8456e9d 100644 --- a/misc/ninja_syntax.py +++ b/misc/ninja_syntax.py @@ -23,37 +23,54 @@ import re import textwrap +from io import TextIOWrapper +from typing import Dict, List, Match, Optional, Tuple, Union -def escape_path(word): +def escape_path(word: str) -> str: return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:') class Writer(object): - def __init__(self, output, width=78): + def __init__(self, output: TextIOWrapper, width: int = 78) -> None: self.output = output self.width = width - def newline(self): + def newline(self) -> None: self.output.write('\n') - def comment(self, text): + def comment(self, text: str) -> None: for line in textwrap.wrap(text, self.width - 2, break_long_words=False, break_on_hyphens=False): self.output.write('# ' + line + '\n') - def variable(self, key, value, indent=0): + def variable( + self, + key: str, + value: Optional[Union[bool, int, float, str, List[str]]], + indent: int = 0, + ) -> None: if value is None: return if isinstance(value, list): value = ' '.join(filter(None, value)) # Filter out empty strings. self._line('%s = %s' % (key, value), indent) - def pool(self, name, depth): + def pool(self, name: str, depth: int) -> None: self._line('pool %s' % name) self.variable('depth', depth, indent=1) - def rule(self, name, command, description=None, depfile=None, - generator=False, pool=None, restat=False, rspfile=None, - rspfile_content=None, deps=None): + def rule( + self, + name: str, + command: str, + description: Optional[str] = None, + depfile: Optional[str] = None, + generator: bool = False, + pool: Optional[str] = None, + restat: bool = False, + rspfile: Optional[str] = None, + rspfile_content: Optional[str] = None, + deps: Optional[Union[str, List[str]]] = None, + ) -> None: self._line('rule %s' % name) self.variable('command', command, indent=1) if description: @@ -73,8 +90,23 @@ def rule(self, name, command, description=None, depfile=None, if deps: self.variable('deps', deps, indent=1) - def build(self, outputs, rule, inputs=None, implicit=None, order_only=None, - variables=None, implicit_outputs=None, pool=None, dyndep=None): + def build( + self, + outputs: Union[str, List[str]], + rule: str, + inputs: Optional[Union[str, List[str]]] = None, + implicit: Optional[Union[str, List[str]]] = None, + order_only: Optional[Union[str, List[str]]] = None, + variables: Optional[ + Union[ + List[Tuple[str, Optional[Union[str, List[str]]]]], + Dict[str, Optional[Union[str, List[str]]]], + ] + ] = None, + implicit_outputs: Optional[Union[str, List[str]]] = None, + pool: Optional[str] = None, + dyndep: Optional[str] = None, + ) -> List[str]: outputs = as_list(outputs) out_outputs = [escape_path(x) for x in outputs] all_inputs = [escape_path(x) for x in as_list(inputs)] @@ -111,16 +143,16 @@ def build(self, outputs, rule, inputs=None, implicit=None, order_only=None, return outputs - def include(self, path): + def include(self, path: str) -> None: self._line('include %s' % path) - def subninja(self, path): + def subninja(self, path: str) -> None: self._line('subninja %s' % path) - def default(self, paths): + def default(self, paths: Union[str, List[str]]) -> None: self._line('default %s' % ' '.join(as_list(paths))) - def _count_dollars_before_index(self, s, i): + def _count_dollars_before_index(self, s: str, i: int) -> int: """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 @@ -129,7 +161,7 @@ def _count_dollars_before_index(self, s, i): dollar_index -= 1 return dollar_count - def _line(self, text, indent=0): + def _line(self, text: str, indent: int = 0) -> None: """Write 'text' word-wrapped at self.width characters.""" leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: @@ -165,11 +197,11 @@ def _line(self, text, indent=0): self.output.write(leading_space + text + '\n') - def close(self): + def close(self) -> None: self.output.close() -def as_list(input): +def as_list(input: Optional[Union[str, List[str]]]) -> List[str]: if input is None: return [] if isinstance(input, list): @@ -177,7 +209,7 @@ def as_list(input): return [input] -def escape(string): +def escape(string: str) -> str: """Escape a string such that it can be embedded into a Ninja file without further interpretation.""" assert '\n' not in string, 'Ninja syntax does not allow newlines' @@ -185,13 +217,13 @@ def escape(string): return string.replace('$', '$$') -def expand(string, vars, local_vars={}): +def expand(string: str, vars: Dict[str, str], local_vars: Dict[str, str] = {}) -> str: """Expand a string containing $vars as Ninja would. Note: doesn't handle the full Ninja variable syntax, but it's enough to make configure.py's use of it work. """ - def exp(m): + def exp(m: Match[str]) -> str: var = m.group(1) if var == '$': return '$' From 1dcebc6399dc76a9bdf643ad9722d7f2d7fee51c Mon Sep 17 00:00:00 2001 From: Jan Niklas Hasse Date: Thu, 11 Apr 2024 18:42:06 +0200 Subject: [PATCH 127/127] mark this 1.13.0.git --- src/version.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.cc b/src/version.cc index d3069579f7..f97b77d568 100644 --- a/src/version.cc +++ b/src/version.cc @@ -20,7 +20,7 @@ using namespace std; -const char* kNinjaVersion = "1.12.0.git"; +const char* kNinjaVersion = "1.13.0.git"; void ParseVersion(const string& version, int* major, int* minor) { size_t end = version.find('.');