1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 19:27:44 +00:00

LibJS: Fix a bunch of unwind related errors in GenerateCFG

We were missing some unwind related control flow paths, and followed
some in improper ways, leading us to access a dead unwind frame in some
cases, as well as generating a technically wrong CFG.

This reorders the ways EnterUnwindContext works and alleviates those
errors.
This commit is contained in:
Hendiadyoin1 2023-03-05 19:04:28 +01:00 committed by Linus Groh
parent d38a3ca9eb
commit 170f732000

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/TemporaryChange.h>
#include <LibJS/Bytecode/PassManager.h>
namespace JS::Bytecode::Passes {
@ -11,26 +12,34 @@ namespace JS::Bytecode::Passes {
struct UnwindFrame {
BasicBlock const* handler;
BasicBlock const* finalizer;
Vector<BasicBlock const*> finalizer_targets;
};
static HashTable<BasicBlock const*> seen_blocks;
static Vector<UnwindFrame*> unwind_frames;
static BasicBlock const* next_handler_or_finalizer()
{
return unwind_frames.last()->handler ?: unwind_frames.last()->finalizer;
}
static void generate_cfg_for_block(BasicBlock const& current_block, PassPipelineExecutable executable)
{
seen_blocks.set(&current_block);
auto enter_label = [&](Label const& label, auto& entering_block) {
auto enter_label = [&](Label const& label, BasicBlock const& entering_block) {
executable.cfg->ensure(&entering_block).set(&label.block());
executable.inverted_cfg->ensure(&label.block()).set(&entering_block);
// The finalizer of an unwind context is handled separately
if (!seen_blocks.contains(&label.block()) && unwind_frames.last()->finalizer != &current_block)
// The finalizers and handlers of an unwind context are handled separately
if (!seen_blocks.contains(&label.block())
&& &label.block() != unwind_frames.last()->handler
&& &label.block() != unwind_frames.last()->finalizer)
generate_cfg_for_block(label.block(), executable);
};
if (auto const* handler = unwind_frames.last()->handler)
enter_label(Label { *handler }, current_block);
if (auto const* block = next_handler_or_finalizer())
enter_label(Label { *block }, current_block);
for (InstructionStreamIterator it { current_block.instruction_stream() }; !it.at_end(); ++it) {
auto const& instruction = *it;
@ -39,6 +48,9 @@ static void generate_cfg_for_block(BasicBlock const& current_block, PassPipeline
if (unwind_frames.last()->finalizer && unwind_frames.last()->finalizer != &current_block)
dbgln("FIXME: Popping finalizer from the unwind context from outside the finalizer");
unwind_frames.take_last();
if (auto const* block = next_handler_or_finalizer())
enter_label(Label { *block }, current_block);
}
if (!instruction.is_terminator())
@ -54,15 +66,16 @@ static void generate_cfg_for_block(BasicBlock const& current_block, PassPipeline
case JumpConditional:
case JumpNullish:
case JumpUndefined: {
// FIXME: Can we avoid this copy
// FIXME: It would be nice if we could avoid this copy, if we know that the unwind context stays the same in both paths
// Or with a COW capable Vector alternative
// Note: We might partially unwind here, so we need to make a copy of
// the current context to assure that the falsy code path has the same one
auto saved_context = unwind_frames;
{
TemporaryChange saved_context { unwind_frames, unwind_frames };
auto true_target = *static_cast<Op::Jump const&>(instruction).true_target();
enter_label(true_target, current_block);
unwind_frames = move(saved_context);
auto true_target = *static_cast<Op::Jump const&>(instruction).true_target();
enter_label(true_target, current_block);
}
auto false_target = *static_cast<Op::Jump const&>(instruction).false_target();
enter_label(false_target, current_block);
@ -73,8 +86,9 @@ static void generate_cfg_for_block(BasicBlock const& current_block, PassPipeline
if (continuation.has_value()) {
executable.exported_blocks->set(&continuation->block());
enter_label(*continuation, current_block);
} else if (auto const* finalizer = unwind_frames.is_empty() ? nullptr : unwind_frames.last()->finalizer) {
} else if (auto const* finalizer = unwind_frames.last()->finalizer) {
enter_label(Label { *finalizer }, current_block);
unwind_frames.last()->finalizer_targets.append(nullptr);
}
return;
}
@ -84,33 +98,46 @@ static void generate_cfg_for_block(BasicBlock const& current_block, PassPipeline
auto finalizer_target = static_cast<Op::EnterUnwindContext const&>(instruction).finalizer_target();
// We keep the frame alive here on the stack, to save some allocation size
UnwindFrame frame { nullptr, finalizer_target.has_value() ? &finalizer_target->block() : nullptr };
UnwindFrame frame {
.handler = handler_target.has_value() ? &handler_target->block() : nullptr,
.finalizer = finalizer_target.has_value() ? &finalizer_target->block() : nullptr,
.finalizer_targets = {}
};
unwind_frames.append(&frame);
if (handler_target.has_value()) {
// We might pop from this context while in the handler, so we
// need to save it and return it to its rightful place
// FIXME: We can relax this when we are stricter about entering finally statements
auto saved_context = unwind_frames;
enter_label(*handler_target, current_block);
unwind_frames = move(saved_context);
frame.handler = &handler_target->block();
}
{
// We might pop from this context while in the handler, so we
// need to save it and return it to its rightful place
// FIXME: We can relax this when we are stricter about entering finally statements
auto saved_context = unwind_frames;
// This will enter the handler and finalizer when needed.
TemporaryChange saved_context { unwind_frames, unwind_frames };
enter_label(entry_point, current_block);
unwind_frames = move(saved_context);
}
frame.handler = nullptr;
if (handler_target.has_value()) {
// We manually generate the CFG, because we previously skiped it
TemporaryChange saved_context { unwind_frames, unwind_frames };
generate_cfg_for_block(handler_target->block(), executable);
}
if (finalizer_target.has_value()) {
auto saved_context = unwind_frames;
// We manually generate the CFG, because we previously halted before entering it
generate_cfg_for_block(finalizer_target->block(), executable);
VERIFY(unwind_frames.last() != &frame);
enter_label(*finalizer_target, current_block);
// We previously halted execution when we would enter the finalizer,
// So we now have to visit all possible targets
// This mainly affects the ScheduleJump instruction
for (auto const* block : frame.finalizer_targets) {
if (block == nullptr) {
// This signals a `return`, which we do not handle specially, so we skip
continue;
}
if (!seen_blocks.contains(block))
generate_cfg_for_block(*block, executable);
}
} else {
VERIFY(unwind_frames.last() = &frame);
unwind_frames.take_last();
VERIFY(frame.finalizer_targets.is_empty());
}
return;
@ -118,6 +145,13 @@ static void generate_cfg_for_block(BasicBlock const& current_block, PassPipeline
case ContinuePendingUnwind: {
auto resume_target = static_cast<Op::ContinuePendingUnwind const&>(instruction).resume_target();
enter_label(resume_target, current_block);
// Note: We already mark these possible control flow changes further up, but when we get
// get better error awareness, being explicit here will be required
if (auto const* handler = unwind_frames.last()->handler)
enter_label(Label { *handler }, current_block);
else if (auto const* finalizer = unwind_frames.last()->finalizer)
enter_label(Label { *finalizer }, current_block);
return;
}
case Throw:
@ -125,19 +159,27 @@ static void generate_cfg_for_block(BasicBlock const& current_block, PassPipeline
// but lets be correct and mark it again,
// this will be useful once we have more info on which instruction can
// actually fail
if (auto const* handler = unwind_frames.last()->finalizer)
if (auto const* handler = unwind_frames.last()->handler) {
enter_label(Label { *handler }, current_block);
else if (auto const* finalizer = unwind_frames.last()->finalizer)
} else if (auto const* finalizer = unwind_frames.last()->finalizer) {
enter_label(Label { *finalizer }, current_block);
// Note: This error might bubble through the finalizer to the next handler/finalizer,
// This is currently marked in the general path
}
return;
case Return:
if (auto const* finalizer = unwind_frames.last()->finalizer)
if (auto const* finalizer = unwind_frames.last()->finalizer) {
enter_label(Label { *finalizer }, current_block);
unwind_frames.last()->finalizer_targets.append(nullptr);
}
return;
case ScheduleJump:
case ScheduleJump: {
enter_label(Label { *unwind_frames.last()->finalizer }, current_block);
enter_label(static_cast<Op::ScheduleJump const&>(instruction).target(), *unwind_frames.last()->finalizer);
unwind_frames.last()->finalizer_targets.append(
&static_cast<Op::ScheduleJump const&>(instruction).target().block());
return;
}
default:
dbgln("Unhandled terminator instruction: `{}`", instruction.to_deprecated_string(executable.executable));
VERIFY_NOT_REACHED();