1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 04:27:44 +00:00

LibJS/JIT: Add fast path for Add Int32, Int32

This uses the 32-bit registers to perform the addition and bail if the
overflow flag (OF) is set.
This commit is contained in:
iliadsh 2023-10-29 01:07:14 +00:00 committed by Andreas Kling
parent 4b23a7dfb4
commit 4f3945024a
3 changed files with 74 additions and 1 deletions

View file

@ -398,6 +398,15 @@ struct Assembler {
label.add_jump(*this, m_output.size());
}
void jump_if_overflow(Label& label)
{
// jo label (RIP-relative 32-bit offset)
emit8(0x0f);
emit8(0x80);
emit32(0xdeadbeef);
label.add_jump(*this, m_output.size());
}
void sign_extend_32_to_64_bits(Reg reg)
{
// movsxd (reg as 64-bit), (reg as 32-bit)
@ -551,6 +560,24 @@ struct Assembler {
}
}
void add32(Operand dst, Operand src)
{
if (dst.type == Operand::Type::Reg && to_underlying(dst.reg) < 8 && src.type == Operand::Type::Reg && to_underlying(src.reg) < 8) {
emit8(0x01);
emit8(0xc0 | (encode_reg(src.reg) << 3) | encode_reg(dst.reg));
} else if (dst.type == Operand::Type::Reg && to_underlying(dst.reg) < 8 && src.type == Operand::Type::Imm && src.fits_in_i8()) {
emit8(0x83);
emit8(0xc0 | encode_reg(dst.reg));
emit8(src.offset_or_immediate);
} else if (dst.type == Operand::Type::Reg && to_underlying(dst.reg) < 8 && src.type == Operand::Type::Imm && src.fits_in_i32()) {
emit8(0x81);
emit8(0xc0 | encode_reg(dst.reg));
emit32(src.offset_or_immediate);
} else {
VERIFY_NOT_REACHED();
}
}
void sub(Operand dst, Operand src)
{
if (dst.type == Operand::Type::Reg && src.type == Operand::Type::Reg) {

View file

@ -503,6 +503,49 @@ static ThrowCompletionOr<Value> typed_equals(VM&, Value src1, Value src2)
JS_ENUMERATE_COMMON_BINARY_OPS_WITHOUT_FAST_PATH(DO_COMPILE_COMMON_BINARY_OP)
# undef DO_COMPILE_COMMON_BINARY_OP
static Value cxx_add(VM& vm, Value lhs, Value rhs)
{
return TRY_OR_SET_EXCEPTION(add(vm, lhs, rhs));
}
void Compiler::compile_add(Bytecode::Op::Add const& op)
{
load_vm_register(ARG1, op.lhs());
load_vm_register(ARG2, Bytecode::Register::accumulator());
Assembler::Label end {};
Assembler::Label slow_case {};
branch_if_both_int32(ARG1, ARG2, [&] {
// GPR0 = ARG1 + ARG2 (32-bit)
m_assembler.mov(
Assembler::Operand::Register(GPR0),
Assembler::Operand::Register(ARG1));
m_assembler.add32(
Assembler::Operand::Register(GPR0),
Assembler::Operand::Register(ARG2));
// if (overflow) goto slow_case;
m_assembler.jump_if_overflow(slow_case);
// accumulator = GPR0 | SHIFTED_INT32_TAG;
m_assembler.mov(
Assembler::Operand::Register(GPR1),
Assembler::Operand::Imm(SHIFTED_INT32_TAG));
m_assembler.bitwise_or(
Assembler::Operand::Register(GPR0),
Assembler::Operand::Register(GPR1));
store_vm_register(Bytecode::Register::accumulator(), GPR0);
m_assembler.jump(end);
});
slow_case.link(m_assembler);
native_call((void*)cxx_add);
store_vm_register(Bytecode::Register::accumulator(), RET);
check_exception();
end.link(m_assembler);
}
static Value cxx_less_than(VM& vm, Value lhs, Value rhs)
{
return TRY_OR_SET_EXCEPTION(less_than(vm, lhs, rhs));
@ -1257,6 +1300,9 @@ OwnPtr<NativeExecutable> Compiler::compile(Bytecode::Executable& bytecode_execut
case Bytecode::Instruction::Type::SetVariable:
compiler.compile_set_variable(static_cast<Bytecode::Op::SetVariable const&>(op));
break;
case Bytecode::Instruction::Type::Add:
compiler.compile_add(static_cast<Bytecode::Op::Add const&>(op));
break;
case Bytecode::Instruction::Type::LessThan:
compiler.compile_less_than(static_cast<Bytecode::Op::LessThan const&>(op));
break;

View file

@ -57,7 +57,6 @@ private:
void compile_resolve_this_binding(Bytecode::Op::ResolveThisBinding const&);
# define JS_ENUMERATE_COMMON_BINARY_OPS_WITHOUT_FAST_PATH(O) \
O(Add, add) \
O(Sub, sub) \
O(Mul, mul) \
O(Div, div) \
@ -91,6 +90,7 @@ private:
JS_ENUMERATE_COMMON_UNARY_OPS(DO_COMPILE_COMMON_UNARY_OP)
# undef DO_COMPILE_COMMON_UNARY_OP
void compile_add(Bytecode::Op::Add const&);
void compile_less_than(Bytecode::Op::LessThan const&);
void compile_return(Bytecode::Op::Return const&);