1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 11:57:35 +00:00

AK/Lagom: Modify TestSuite to return how many tests failed from main

This allows us to remove the FAIL_REGEX logic from the CTest invocation
of AK and LibRegex tests, as they will return a non-zero exit code on
failure :^).

Also means that running a failing TestSuite-enabled test with the
run-test-and-shutdown script will actually print that the test failed.
This commit is contained in:
Andrew Kaster 2021-02-28 12:47:48 -07:00 committed by Andreas Kling
parent 5b1edc0678
commit 669b6c43aa
2 changed files with 43 additions and 28 deletions

View file

@ -34,6 +34,10 @@ namespace AK {
template<typename... Parameters> template<typename... Parameters>
void warnln(CheckedFormatString<Parameters...>&& fmtstr, const Parameters&...); void warnln(CheckedFormatString<Parameters...>&& fmtstr, const Parameters&...);
// Declare a helper so that we can call it from VERIFY in included headers
// before defining TestSuite
inline void current_test_case_did_fail();
} }
using AK::warnln; using AK::warnln;
@ -41,8 +45,10 @@ using AK::warnln;
#undef VERIFY #undef VERIFY
#define VERIFY(x) \ #define VERIFY(x) \
do { \ do { \
if (!(x)) \ if (!(x)) { \
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY({}) failed", __FILE__, __LINE__, #x); \ ::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: VERIFY({}) failed", __FILE__, __LINE__, #x); \
current_test_case_did_fail(); \
} \
} while (false) } while (false)
#undef VERIFY_NOT_REACHED #undef VERIFY_NOT_REACHED
@ -127,23 +133,28 @@ public:
s_global = nullptr; s_global = nullptr;
} }
void run(const NonnullRefPtrVector<TestCase>&); int run(const NonnullRefPtrVector<TestCase>&);
void main(const String& suite_name, int argc, char** argv); int main(const String& suite_name, int argc, char** argv);
NonnullRefPtrVector<TestCase> find_cases(const String& search, bool find_tests, bool find_benchmarks); NonnullRefPtrVector<TestCase> find_cases(const String& search, bool find_tests, bool find_benchmarks);
void add_case(const NonnullRefPtr<TestCase>& test_case) void add_case(const NonnullRefPtr<TestCase>& test_case)
{ {
m_cases.append(test_case); m_cases.append(test_case);
} }
void current_test_case_did_fail() { m_current_test_case_passed = false; }
private: private:
static TestSuite* s_global; static TestSuite* s_global;
NonnullRefPtrVector<TestCase> m_cases; NonnullRefPtrVector<TestCase> m_cases;
u64 m_testtime = 0; u64 m_testtime = 0;
u64 m_benchtime = 0; u64 m_benchtime = 0;
String m_suite_name; String m_suite_name;
bool m_current_test_case_passed = true;
}; };
void TestSuite::main(const String& suite_name, int argc, char** argv) inline void current_test_case_did_fail() { TestSuite::the().current_test_case_did_fail(); }
int TestSuite::main(const String& suite_name, int argc, char** argv)
{ {
m_suite_name = suite_name; m_suite_name = suite_name;
@ -167,11 +178,12 @@ void TestSuite::main(const String& suite_name, int argc, char** argv)
for (const auto& test : matching_tests) { for (const auto& test : matching_tests) {
outln(" {}", test.name()); outln(" {}", test.name());
} }
} else { return 0;
}
outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size()); outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size());
run(matching_tests); return run(matching_tests);
}
} }
NonnullRefPtrVector<TestCase> TestSuite::find_cases(const String& search, bool find_tests, bool find_benchmarks) NonnullRefPtrVector<TestCase> TestSuite::find_cases(const String& search, bool find_tests, bool find_benchmarks)
@ -194,9 +206,10 @@ NonnullRefPtrVector<TestCase> TestSuite::find_cases(const String& search, bool f
return matches; return matches;
} }
void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests) int TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
{ {
size_t test_count = 0; size_t test_count = 0;
size_t test_failed_count = 0;
size_t benchmark_count = 0; size_t benchmark_count = 0;
TestElapsedTimer global_timer; TestElapsedTimer global_timer;
@ -204,12 +217,13 @@ void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
const auto test_type = t.is_benchmark() ? "benchmark" : "test"; const auto test_type = t.is_benchmark() ? "benchmark" : "test";
warnln("Running {} '{}'.", test_type, t.name()); warnln("Running {} '{}'.", test_type, t.name());
m_current_test_case_passed = true;
TestElapsedTimer timer; TestElapsedTimer timer;
t.func()(); t.func()();
const auto time = timer.elapsed_milliseconds(); const auto time = timer.elapsed_milliseconds();
dbgln("Completed {} '{}' in {}ms", test_type, t.name(), time); dbgln("{} {} '{}' in {}ms", m_current_test_case_passed ? "Completed" : "Failed", test_type, t.name(), time);
if (t.is_benchmark()) { if (t.is_benchmark()) {
m_benchtime += time; m_benchtime += time;
@ -218,6 +232,10 @@ void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
m_testtime += time; m_testtime += time;
test_count++; test_count++;
} }
if (!m_current_test_case_passed) {
test_failed_count++;
}
} }
dbgln("Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other).", dbgln("Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other).",
@ -227,10 +245,14 @@ void TestSuite::run(const NonnullRefPtrVector<TestCase>& tests)
m_testtime, m_testtime,
m_benchtime, m_benchtime,
global_timer.elapsed_milliseconds() - (m_testtime + m_benchtime)); global_timer.elapsed_milliseconds() - (m_testtime + m_benchtime));
dbgln("Out of {} tests, {} passed and {} failed.", test_count, test_count - test_failed_count, test_failed_count);
return (int)test_failed_count;
} }
} }
using AK::current_test_case_did_fail;
using AK::TestCase; using AK::TestCase;
using AK::TestSuite; using AK::TestSuite;
@ -268,16 +290,19 @@ using AK::TestSuite;
int main(int argc, char** argv) \ int main(int argc, char** argv) \
{ \ { \
static_assert(compiletime_lenof(#x) != 0, "Set SuiteName"); \ static_assert(compiletime_lenof(#x) != 0, "Set SuiteName"); \
TestSuite::the().main(#x, argc, argv); \ int ret = TestSuite::the().main(#x, argc, argv); \
TestSuite::release(); \ TestSuite::release(); \
return ret; \
} }
#define EXPECT_EQ(a, b) \ #define EXPECT_EQ(a, b) \
do { \ do { \
auto lhs = (a); \ auto lhs = (a); \
auto rhs = (b); \ auto rhs = (b); \
if (lhs != rhs) \ if (lhs != rhs) { \
warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \ warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \
current_test_case_did_fail(); \
} \
} while (false) } while (false)
// If you're stuck and `EXPECT_EQ` seems to refuse to print anything useful, // If you're stuck and `EXPECT_EQ` seems to refuse to print anything useful,
@ -286,12 +311,16 @@ using AK::TestSuite;
do { \ do { \
auto lhs = (a); \ auto lhs = (a); \
auto rhs = (b); \ auto rhs = (b); \
if (lhs != rhs) \ if (lhs != rhs) { \
warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, lhs, rhs); \ warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, lhs, rhs); \
current_test_case_did_fail(); \
} \
} while (false) } while (false)
#define EXPECT(x) \ #define EXPECT(x) \
do { \ do { \
if (!(x)) \ if (!(x)) { \
warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT({}) failed", __FILE__, __LINE__, #x); \ warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT({}) failed", __FILE__, __LINE__, #x); \
current_test_case_did_fail(); \
} \
} while (false) } while (false)

View file

@ -177,13 +177,6 @@ if (BUILD_LAGOM)
# FIXME: Only TestJSON needs this property # FIXME: Only TestJSON needs this property
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../AK/Tests WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../AK/Tests
) )
set_tests_properties(
${name}_lagom
PROPERTIES
FAIL_REGULAR_EXPRESSION
"FAIL"
)
endforeach() endforeach()
foreach(source ${LIBREGEX_TESTS}) foreach(source ${LIBREGEX_TESTS})
@ -195,13 +188,6 @@ if (BUILD_LAGOM)
COMMAND ${name}_lagom COMMAND ${name}_lagom
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
) )
set_tests_properties(
${name}_lagom
PROPERTIES
FAIL_REGULAR_EXPRESSION
"FAIL"
)
endforeach() endforeach()
endif() endif()
endif() endif()