mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 08:27:46 +00:00
LibTest: Add ability to turn test failure reporting on/off
This will be very useful as we add the randomized test cases and their two loops ("generate+test many times" and "shrink once failure is found"), because without this failure reporting we'd get many FAIL error messages while still searching for the minimal one. So, inside randomized test cases we want to only turn the error reporting on for one last time after all the generating and shrinking.
This commit is contained in:
parent
99e2d42a53
commit
1bcfead020
3 changed files with 90 additions and 48 deletions
|
@ -26,59 +26,71 @@ TestResult current_test_result();
|
|||
|
||||
Randomized::RandomnessSource& randomness_source();
|
||||
void set_randomness_source(Randomized::RandomnessSource);
|
||||
|
||||
bool is_reporting_enabled();
|
||||
void enable_reporting();
|
||||
void disable_reporting();
|
||||
}
|
||||
|
||||
#define EXPECT_EQ(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs != rhs) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
#define EXPECT_EQ(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs != rhs) { \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", \
|
||||
__FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define EXPECT_EQ_TRUTH(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
bool ltruth = static_cast<bool>(lhs); \
|
||||
bool rtruth = static_cast<bool>(rhs); \
|
||||
if (ltruth != rtruth) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ_TRUTH({}, {}) failed with lhs={} ({}) and rhs={} ({})", \
|
||||
__FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, ltruth, FormatIfSupported { rhs }, rtruth); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
#define EXPECT_EQ_TRUTH(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
bool ltruth = static_cast<bool>(lhs); \
|
||||
bool rtruth = static_cast<bool>(rhs); \
|
||||
if (ltruth != rtruth) { \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ_TRUTH({}, {}) failed with lhs={} ({}) and rhs={} ({})", \
|
||||
__FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, ltruth, FormatIfSupported { rhs }, rtruth); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
// If you're stuck and `EXPECT_EQ` seems to refuse to print anything useful,
|
||||
// try this: It'll spit out a nice compiler error telling you why it doesn't print.
|
||||
#define EXPECT_EQ_FORCE(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs != rhs) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, lhs, rhs); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
#define EXPECT_EQ_FORCE(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs != rhs) { \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_EQ({}, {}) failed with lhs={} and rhs={}", \
|
||||
__FILE__, __LINE__, #a, #b, lhs, rhs); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define EXPECT_NE(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs == rhs) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_NE({}, {}) failed with lhs={} and rhs={}", __FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
#define EXPECT_NE(a, b) \
|
||||
do { \
|
||||
auto lhs = (a); \
|
||||
auto rhs = (b); \
|
||||
if (lhs == rhs) { \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_NE({}, {}) failed with lhs={} and rhs={}", \
|
||||
__FILE__, __LINE__, #a, #b, FormatIfSupported { lhs }, FormatIfSupported { rhs }); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define EXPECT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT({}) failed", __FILE__, __LINE__, #x); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
#define EXPECT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT({}) failed", __FILE__, __LINE__, #x); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define EXPECT_APPROXIMATE_WITH_ERROR(a, b, err) \
|
||||
|
@ -87,19 +99,21 @@ void set_randomness_source(Randomized::RandomnessSource);
|
|||
auto expect_close_rhs = b; \
|
||||
auto expect_close_diff = static_cast<double>(expect_close_lhs) - static_cast<double>(expect_close_rhs); \
|
||||
if (AK::fabs(expect_close_diff) > (err)) { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_APPROXIMATE({}, {})" \
|
||||
" failed with lhs={}, rhs={}, (lhs-rhs)={}", \
|
||||
__FILE__, __LINE__, #a, #b, expect_close_lhs, expect_close_rhs, expect_close_diff); \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: EXPECT_APPROXIMATE({}, {})" \
|
||||
" failed with lhs={}, rhs={}, (lhs-rhs)={}", \
|
||||
__FILE__, __LINE__, #a, #b, expect_close_lhs, expect_close_rhs, expect_close_diff); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define EXPECT_APPROXIMATE(a, b) EXPECT_APPROXIMATE_WITH_ERROR(a, b, 0.0000005)
|
||||
|
||||
#define FAIL(message) \
|
||||
do { \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: {}", __FILE__, __LINE__, message); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
#define FAIL(message) \
|
||||
do { \
|
||||
if (::Test::is_reporting_enabled()) \
|
||||
::AK::warnln("\033[31;1mFAIL\033[0m: {}:{}: {}", __FILE__, __LINE__, message); \
|
||||
::Test::set_current_test_result(::Test::TestResult::Failed); \
|
||||
} while (false)
|
||||
|
||||
// To use, specify the lambda to execute in a sub process and verify it exits:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue