1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-30 15:08:12 +00:00

LibJS/JIT: Support alternative entry point blocks

If Interpreter::run_and_return_frame is called with a specific entry
point we now map that to a native instruction address, which the JIT
code jumps to after the function prologue.
This commit is contained in:
Simon Wanner 2023-11-02 21:52:20 +01:00 committed by Andreas Kling
parent 38f3b78a1d
commit e400682fb1
4 changed files with 46 additions and 5 deletions

View file

@ -367,7 +367,10 @@ Interpreter::ValueAndFrame Interpreter::run_and_return_frame(Executable& executa
vm().execution_context_stack().last()->executable = &executable;
if (auto native_executable = executable.get_or_create_native_executable()) {
native_executable->run(vm());
auto block_index = 0;
if (entry_point)
block_index = executable.basic_blocks.find_first_index_if([&](auto const& block) { return block.ptr() == entry_point; }).value();
native_executable->run(vm(), block_index);
#if 0
for (size_t i = 0; i < vm().running_execution_context().local_variables.size(); ++i) {

View file

@ -1736,11 +1736,32 @@ OwnPtr<NativeExecutable> Compiler::compile(Bytecode::Executable& bytecode_execut
compiler.reload_cached_accumulator();
Assembler::Label normal_entry {};
compiler.m_assembler.jump_if(
Assembler::Operand::Register(ARG3),
Assembler::Condition::EqualTo,
Assembler::Operand::Imm(0),
normal_entry);
compiler.m_assembler.jump(Assembler::Operand::Register(ARG3));
normal_entry.link(compiler.m_assembler);
for (size_t block_index = 0; block_index < bytecode_executable.basic_blocks.size(); block_index++) {
auto& block = bytecode_executable.basic_blocks[block_index];
compiler.block_data_for(*block).start_offset = compiler.m_output.size();
compiler.set_current_block(*block);
auto it = Bytecode::InstructionStreamIterator(block->instruction_stream());
if (it.at_end()) {
mapping.append({
.native_offset = compiler.m_output.size(),
.block_index = block_index,
.bytecode_offset = 0,
});
}
while (!it.at_end()) {
auto const& op = *it;

View file

@ -19,6 +19,15 @@ NativeExecutable::NativeExecutable(void* code, size_t size, Vector<BytecodeMappi
, m_size(size)
, m_mapping(move(mapping))
{
// Translate block index to instruction address, so the native code can just jump to it.
for (auto const& entry : m_mapping) {
if (entry.block_index == BytecodeMapping::EXECUTABLE)
continue;
if (entry.bytecode_offset == 0) {
VERIFY(entry.block_index == m_block_entry_points.size());
m_block_entry_points.append(bit_cast<FlatPtr>(m_code) + entry.native_offset);
}
}
}
NativeExecutable::~NativeExecutable()
@ -26,12 +35,19 @@ NativeExecutable::~NativeExecutable()
munmap(m_code, m_size);
}
void NativeExecutable::run(VM& vm) const
void NativeExecutable::run(VM& vm, size_t entry_point) const
{
typedef void (*JITCode)(VM&, Value* registers, Value* locals);
FlatPtr entry_point_address = 0;
if (entry_point != 0) {
entry_point_address = m_block_entry_points[entry_point];
VERIFY(entry_point_address != 0);
}
typedef void (*JITCode)(VM&, Value* registers, Value* locals, FlatPtr entry_point_address);
((JITCode)m_code)(vm,
vm.bytecode_interpreter().registers().data(),
vm.running_execution_context().local_variables.data());
vm.running_execution_context().local_variables.data(),
entry_point_address);
}
#if ARCH(X86_64)

View file

@ -31,7 +31,7 @@ public:
NativeExecutable(void* code, size_t size, Vector<BytecodeMapping>);
~NativeExecutable();
void run(VM&) const;
void run(VM&, size_t entry_point) const;
void dump_disassembly(Bytecode::Executable const& executable) const;
BytecodeMapping const& find_mapping_entry(size_t native_offset) const;
Optional<UnrealizedSourceRange> get_source_range(Bytecode::Executable const& executable, FlatPtr address) const;
@ -42,6 +42,7 @@ private:
void* m_code { nullptr };
size_t m_size { 0 };
Vector<BytecodeMapping> m_mapping;
Vector<FlatPtr> m_block_entry_points;
mutable OwnPtr<Bytecode::InstructionStreamIterator> m_instruction_stream_iterator;
};