code = compiler::CodeAssembler::GenerateCode(&state); size_t index = GetDispatchTableIndex(bytecode, operand_scale); dispatch_table_[index] = code->entry(); TraceCodegen(code); PROFILE(isolate_, CodeCreateEvent( CodeEventListener::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code), Bytecodes::ToString(bytecode, operand_scale).c_str())); } Code* Interpreter::GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale) { DCHECK(IsDispatchTableInitialized()); DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); size_t index = GetDispatchTableIndex(bytecode, operand_scale); Address code_entry = dispatch_table_[index]; return Code::GetCodeFromTargetAddress(code_entry); } // static size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode, OperandScale operand_scale) { static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte; size_t index = static_cast(bytecode); switch (operand_scale) { case OperandScale::kSingle: return index; case OperandScale::kDouble: return index + kEntriesPerOperandScale; case OperandScale::kQuadruple: return index + 2 * kEntriesPerOperandScale; } UNREACHABLE(); return 0; } void Interpreter::IterateDispatchTable(ObjectVisitor* v) { for (int i = 0; i < kDispatchTableSize; i++) { Address code_entry = dispatch_table_[i]; Object* code = code_entry == nullptr ? nullptr : Code::GetCodeFromTargetAddress(code_entry); Object* old_code = code; v->VisitPointer(&code); if (code != old_code) { dispatch_table_[i] = reinterpret_cast(code)->entry(); } } } // static int Interpreter::InterruptBudget() { return FLAG_interrupt_budget * kCodeSizeMultiplier; } namespace { bool ShouldPrintBytecode(Handle shared) { if (!FLAG_print_bytecode) return false; // Checks whether function passed the filter. if (shared->is_toplevel()) { Vector filter = CStrVector(FLAG_print_bytecode_filter); return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*'); } else { return shared->PassesFilter(FLAG_print_bytecode_filter); } } } // namespace InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info) : CompilationJob(info->isolate(), info, "Ignition"), generator_(info), runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()), background_execute_counter_("CompileBackgroundIgnition"), print_bytecode_(ShouldPrintBytecode(info->shared_info())) {} InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() { CodeGenerator::MakeCodePrologue(info(), "interpreter"); if (print_bytecode_) { OFStream os(stdout); std::unique_ptr name = info()->GetDebugName(); os << "[generating bytecode for function: " << info()->GetDebugName().get() << "]" << std::endl << std::flush; } return SUCCEEDED; } InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() { TimerScope runtimeTimer = executed_on_background_thread() ? TimerScope(&background_execute_counter_) : TimerScope(runtime_call_stats_, &RuntimeCallStats::CompileIgnition); // TODO(lpy): add support for background compilation RCS trace. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition"); generator()->GenerateBytecode(stack_limit()); if (generator()->HasStackOverflow()) { return FAILED; } return SUCCEEDED; } InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() { // Add background runtime call stats. if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) { runtime_call_stats_->CompileBackgroundIgnition.Add( &background_execute_counter_); } RuntimeCallTimerScope runtimeTimer( runtime_call_stats_, &RuntimeCallStats::CompileIgnitionFinalization); Handle bytecodes = generator()->FinalizeBytecode(isolate()); if (generator()->HasStackOverflow()) { return FAILED; } if (print_bytecode_) { OFStream os(stdout); bytecodes->Print(os); os << std::flush; } info()->SetBytecodeArray(bytecodes); info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline()); return SUCCEEDED; } CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) { return new InterpreterCompilationJob(info); } bool Interpreter::IsDispatchTableInitialized() { return dispatch_table_[0] != nullptr; } bool Interpreter::ShouldInitializeDispatchTable() { if (FLAG_trace_ignition || FLAG_trace_ignition_codegen || FLAG_trace_ignition_dispatches) { // Regenerate table to add bytecode tracing operations, print the assembly // code generated by TurboFan or instrument handlers with dispatch counters. return true; } return !IsDispatchTableInitialized(); } void Interpreter::TraceCodegen(Handle code) { #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { OFStream os(stdout); code->Disassemble(nullptr, os); os << std::flush; } #endif // ENABLE_DISASSEMBLER } const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ code->entry()) { \ return #Name; \ } BYTECODE_LIST(RETURN_NAME) #undef RETURN_NAME #endif // ENABLE_DISASSEMBLER return nullptr; } uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const { int from_index = Bytecodes::ToByte(from); int to_index = Bytecodes::ToByte(to); return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes + to_index]; } Local Interpreter::GetDispatchCountersObject() { v8::Isolate* isolate = reinterpret_cast(isolate_); Local context = isolate->GetCurrentContext(); Local counters_map = v8::Object::New(isolate); // Output is a JSON-encoded object of objects. // // The keys on the top level object are source bytecodes, // and corresponding value are objects. Keys on these last are the // destinations of the dispatch and the value associated is a counter for // the correspondent source-destination dispatch chain. // // Only non-zero counters are written to file, but an entry in the top-level // object is always present, even if the value is empty because all counters // for that source are zero. for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) { Bytecode from_bytecode = Bytecodes::FromByte(from_index); Local counters_row = v8::Object::New(isolate); for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) { Bytecode to_bytecode = Bytecodes::FromByte(to_index); uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode); if (counter > 0) { std::string to_name = Bytecodes::ToString(to_bytecode); Local to_name_object = v8::String::NewFromUtf8(isolate, to_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); Local counter_object = v8::Number::New(isolate, counter); CHECK(counters_row ->DefineOwnProperty(context, to_name_object, counter_object) .IsJust()); } } std::string from_name = Bytecodes::ToString(from_bytecode); Local from_name_object = v8::String::NewFromUtf8(isolate, from_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); CHECK( counters_map->DefineOwnProperty(context, from_name_object, counters_row) .IsJust()); } return counters_map; } // LdaZero // // Load literal '0' into the accumulator. void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { Node* zero_value = __ NumberConstant(0.0); __ SetAccumulator(zero_value); __ Dispatch(); } // LdaSmi // // Load an integer literal into the accumulator as a Smi. void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { Node* smi_int = __ BytecodeOperandImmSmi(0); __ SetAccumulator(smi_int); __ Dispatch(); } // LdaConstant // // Load constant literal at |idx| in the constant pool into the accumulator. void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); __ SetAccumulator(constant); __ Dispatch(); } // LdaUndefined // // Load Undefined into the accumulator. void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); __ SetAccumulator(undefined_value); __ Dispatch(); } // LdaNull // // Load Null into the accumulator. void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); __ SetAccumulator(null_value); __ Dispatch(); } // LdaTheHole // // Load TheHole into the accumulator. void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); __ SetAccumulator(the_hole_value); __ Dispatch(); } // LdaTrue // // Load True into the accumulator. void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); __ SetAccumulator(true_value); __ Dispatch(); } // LdaFalse // // Load False into the accumulator. void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); __ SetAccumulator(false_value); __ Dispatch(); } // Ldar // // Load accumulator with value from register . void Interpreter::DoLdar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* value = __ LoadRegister(reg_index); __ SetAccumulator(value); __ Dispatch(); } // Star // // Store accumulator to register . void Interpreter::DoStar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* accumulator = __ GetAccumulator(); __ StoreRegister(accumulator, reg_index); __ Dispatch(); } // Mov // // Stores the value of register to register . void Interpreter::DoMov(InterpreterAssembler* assembler) { Node* src_index = __ BytecodeOperandReg(0); Node* src_value = __ LoadRegister(src_index); Node* dst_index = __ BytecodeOperandReg(1); __ StoreRegister(src_value, dst_index); __ Dispatch(); } void Interpreter::BuildLoadGlobal(int slot_operand_index, int name_operand_index, TypeofMode typeof_mode, InterpreterAssembler* assembler) { // Load the global via the LoadGlobalIC. Node* feedback_vector = __ LoadFeedbackVector(); Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index); AccessorAssembler accessor_asm(assembler->state()); Label try_handler(assembler, Label::kDeferred), miss(assembler, Label::kDeferred); // Fast path without frame construction for the data case. { Label done(assembler); Variable var_result(assembler, MachineRepresentation::kTagged); ExitPoint exit_point(assembler, &done, &var_result); accessor_asm.LoadGlobalIC_TryPropertyCellCase( feedback_vector, feedback_slot, &exit_point, &try_handler, &miss, CodeStubAssembler::INTPTR_PARAMETERS); __ Bind(&done); __ SetAccumulator(var_result.value()); __ Dispatch(); } // Slow path with frame construction. { Label done(assembler); Variable var_result(assembler, MachineRepresentation::kTagged); ExitPoint exit_point(assembler, &done, &var_result); __ Bind(&try_handler); { Node* context = __ GetContext(); Node* smi_slot = __ SmiTag(feedback_slot); Node* name_index = __ BytecodeOperandIdx(name_operand_index); Node* name = __ LoadConstantPoolEntry(name_index); AccessorAssembler::LoadICParameters params(context, nullptr, name, smi_slot, feedback_vector); accessor_asm.LoadGlobalIC_TryHandlerCase(¶ms, typeof_mode, &exit_point, &miss); } __ Bind(&miss); { Node* context = __ GetContext(); Node* smi_slot = __ SmiTag(feedback_slot); Node* name_index = __ BytecodeOperandIdx(name_operand_index); Node* name = __ LoadConstantPoolEntry(name_index); AccessorAssembler::LoadICParameters params(context, nullptr, name, smi_slot, feedback_vector); accessor_asm.LoadGlobalIC_MissCase(¶ms, &exit_point); } __ Bind(&done); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } } // LdaGlobal // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot outside of a typeof. void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF, assembler); } // LdaGlobalInsideTypeof // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot inside of a typeof. void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF, assembler); } void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); Node* native_context = __ LoadNativeContext(context); Node* global = __ LoadContextElement(native_context, Context::EXTENSION_INDEX); // Store the global via the StoreIC. Node* code_target = __ HeapConstant(ic.code()); Node* constant_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); __ CallStub(ic.descriptor(), code_target, context, global, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaGlobalSloppy // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in sloppy mode. void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStaGlobal(ic, assembler); } // StaGlobalStrict // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in strict mode. void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStaGlobal(ic, assembler); } // LdaContextSlot // // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Node* slot_context = __ GetContextAtDepth(context, depth); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // LdaImmutableContextSlot // // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) { // TODO(danno) Share the actual code object rather creating a duplicate one. DoLdaContextSlot(assembler); } // LdaCurrentContextSlot // // Load the object in |slot_index| of the current context into the accumulator. void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) { Node* slot_index = __ BytecodeOperandIdx(0); Node* slot_context = __ GetContext(); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // LdaImmutableCurrentContextSlot // // Load the object in |slot_index| of the current context into the accumulator. void Interpreter::DoLdaImmutableCurrentContextSlot( InterpreterAssembler* assembler) { // TODO(danno) Share the actual code object rather creating a duplicate one. DoLdaCurrentContextSlot(assembler); } // StaContextSlot // // Stores the object in the accumulator into |slot_index| of the context at // |depth| in the context chain starting at |context|. void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Node* slot_context = __ GetContextAtDepth(context, depth); __ StoreContextElement(slot_context, slot_index, value); __ Dispatch(); } // StaCurrentContextSlot // // Stores the object in the accumulator into |slot_index| of the current // context. void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* slot_index = __ BytecodeOperandIdx(0); Node* slot_context = __ GetContext(); __ StoreContextElement(slot_context, slot_index, value); __ Dispatch(); } void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* name_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(name_index); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* context = __ GetContext(); Node* name_index = __ BytecodeOperandIdx(0); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Label slowpath(assembler, Label::kDeferred); // Check for context extensions to allow the fast path. __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); // Fast path does a normal load context. { Node* slot_context = __ GetContextAtDepth(context, depth); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // Slow path when we have to call out to the runtime. __ Bind(&slowpath); { Node* name = __ LoadConstantPoolEntry(name_index); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) { DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupContextSlotInsideTypeof( InterpreterAssembler* assembler) { DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* context = __ GetContext(); Node* depth = __ BytecodeOperandUImm(2); Label slowpath(assembler, Label::kDeferred); // Check for context extensions to allow the fast path __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); // Fast path does a normal load global { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof ? INSIDE_TYPEOF : NOT_INSIDE_TYPEOF; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode, assembler); } // Slow path when we have to call out to the runtime __ Bind(&slowpath); { Node* name_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(name_index); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } } // LdaLookupGlobalSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) { DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupGlobalSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupGlobalSlotInsideTypeof( InterpreterAssembler* assembler) { DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoStaLookupSlot(LanguageMode language_mode, InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict : Runtime::kStoreLookupSlot_Sloppy, context, name, value); __ SetAccumulator(result); __ Dispatch(); } // StaLookupSlotSloppy // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in sloppy mode. void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::SLOPPY, assembler); } // StaLookupSlotStrict // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in strict mode. void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::STRICT, assembler); } // LdaNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry . void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* code_target = __ HeapConstant(ic.code()); Node* register_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(register_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } // KeyedLoadIC // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator. void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* code_target = __ HeapConstant(ic.code()); Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* name = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaNamedPropertySloppy // // Calls the sloppy mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStoreIC(ic, assembler); } // StaNamedPropertyStrict // // Calls the strict mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStoreIC(ic, assembler); } // StaNamedOwnProperty // // Calls the StoreOwnIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_); DoStoreIC(ic, assembler); } void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* name_reg_index = __ BytecodeOperandReg(1); Node* name = __ LoadRegister(name_reg_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaKeyedPropertySloppy // // Calls the sloppy mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); DoKeyedStoreIC(ic, assembler); } // StaKeyedPropertyStrict // // Calls the strict mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); DoKeyedStoreIC(ic, assembler); } // StaDataPropertyInLiteral // // Define a property with value from the accumulator in . // Property attributes and whether set_function_name are stored in // DataPropertyInLiteralFlags . // // This definition is not observable and is used only for definitions // in object or class literals. void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) { Node* object = __ LoadRegister(__ BytecodeOperandReg(0)); Node* name = __ LoadRegister(__ BytecodeOperandReg(1)); Node* value = __ GetAccumulator(); Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2)); Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3)); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name, value, flags, feedback_vector, vector_index); __ Dispatch(); } // LdaModuleVariable // // Load the contents of a module variable into the accumulator. The variable is // identified by . is the depth of the current context // relative to the module context. void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) { Node* cell_index = __ BytecodeOperandImmIntPtr(0); Node* depth = __ BytecodeOperandUImm(1); Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); Node* module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Label if_export(assembler), if_import(assembler), end(assembler); __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, &if_import); __ Bind(&if_export); { Node* regular_exports = __ LoadObjectField(module, Module::kRegularExportsOffset); // The actual array index is (cell_index - 1). Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); __ Goto(&end); } __ Bind(&if_import); { Node* regular_imports = __ LoadObjectField(module, Module::kRegularImportsOffset); // The actual array index is (-cell_index - 1). Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index); Node* cell = __ LoadFixedArrayElement(regular_imports, import_index); __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // StaModuleVariable // // Store accumulator to the module variable identified by . // is the depth of the current context relative to the module context. void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* cell_index = __ BytecodeOperandImmIntPtr(0); Node* depth = __ BytecodeOperandUImm(1); Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); Node* module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Label if_export(assembler), if_import(assembler), end(assembler); __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, &if_import); __ Bind(&if_export); { Node* regular_exports = __ LoadObjectField(module, Module::kRegularExportsOffset); // The actual array index is (cell_index - 1). Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); __ StoreObjectField(cell, Cell::kValueOffset, value); __ Goto(&end); } __ Bind(&if_import); { // Not supported (probably never). __ Abort(kUnsupportedModuleOperation); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // PushContext // // Saves the current context in , and pushes the accumulator as the // new current context. void Interpreter::DoPushContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* new_context = __ GetAccumulator(); Node* old_context = __ GetContext(); __ StoreRegister(old_context, reg_index); __ SetContext(new_context); __ Dispatch(); } // PopContext // // Pops the current context and sets as the new context. void Interpreter::DoPopContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); __ SetContext(context); __ Dispatch(); } // TODO(mythria): Remove this function once all CompareOps record type feedback. void Interpreter::DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* result; switch (compare_op) { case Token::IN: result = assembler->HasProperty(rhs, lhs, context); break; case Token::INSTANCEOF: result = assembler->InstanceOf(lhs, rhs, context); break; default: UNREACHABLE(); } __ SetAccumulator(result); __ Dispatch(); } template void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); Node* result = Generator::Generate(assembler, lhs, rhs, slot_index, feedback_vector, context); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); // TODO(interpreter): the only reason this check is here is because we // sometimes emit comparisons that shouldn't collect feedback (e.g. // try-finally blocks and generators), and we could get rid of this by // introducing Smi equality tests. Label gather_type_feedback(assembler), do_compare(assembler); __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare, &gather_type_feedback); __ Bind(&gather_type_feedback); { Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler), lhs_is_not_string(assembler), gather_rhs_type(assembler), update_feedback(assembler); __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi); var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kSignedSmall)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_smi); { Node* lhs_map = __ LoadMap(lhs); __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number); var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_number); { Node* lhs_instance_type = __ LoadInstanceType(lhs); if (Token::IsOrderedRelationalCompareOp(compare_op)) { Label lhs_is_not_oddball(assembler); __ GotoIfNot( __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), &lhs_is_not_oddball); var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_oddball); } Label lhs_is_not_string(assembler); __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type), &lhs_is_not_string); if (Token::IsOrderedRelationalCompareOp(compare_op)) { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kString)); } else { var_type_feedback.Bind(__ SelectSmiConstant( __ Word32Equal( __ Word32And(lhs_instance_type, __ Int32Constant(kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)), CompareOperationFeedback::kInternalizedString, CompareOperationFeedback::kString)); } __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_string); if (Token::IsEqualityOp(compare_op)) { var_type_feedback.Bind(__ SelectSmiConstant( __ IsJSReceiverInstanceType(lhs_instance_type), CompareOperationFeedback::kReceiver, CompareOperationFeedback::kAny)); } else { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kAny)); } __ Goto(&gather_rhs_type); } } __ Bind(&gather_rhs_type); { Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler); __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi); var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kSignedSmall))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_smi); { Node* rhs_map = __ LoadMap(rhs); __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number); var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kNumber))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_number); { Node* rhs_instance_type = __ LoadInstanceType(rhs); if (Token::IsOrderedRelationalCompareOp(compare_op)) { Label rhs_is_not_oddball(assembler); __ GotoIfNot(__ Word32Equal(rhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), &rhs_is_not_oddball); var_type_feedback.Bind(__ SmiOr( var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kNumberOrOddball))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_oddball); } Label rhs_is_not_string(assembler); __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type), &rhs_is_not_string); if (Token::IsOrderedRelationalCompareOp(compare_op)) { var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kString))); } else { var_type_feedback.Bind(__ SmiOr( var_type_feedback.value(), __ SelectSmiConstant( __ Word32Equal( __ Word32And(rhs_instance_type, __ Int32Constant(kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)), CompareOperationFeedback::kInternalizedString, CompareOperationFeedback::kString))); } __ Goto(&update_feedback); __ Bind(&rhs_is_not_string); if (Token::IsEqualityOp(compare_op)) { var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SelectSmiConstant( __ IsJSReceiverInstanceType(rhs_instance_type), CompareOperationFeedback::kReceiver, CompareOperationFeedback::kAny))); } else { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kAny)); } __ Goto(&update_feedback); } } } __ Bind(&update_feedback); { __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ Goto(&do_compare); } } __ Bind(&do_compare); Node* result; switch (compare_op) { case Token::EQ: result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs, context); break; case Token::NE: result = assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context); break; case Token::EQ_STRICT: result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs, rhs, context); break; case Token::LT: result = assembler->RelationalComparison(CodeStubAssembler::kLessThan, lhs, rhs, context); break; case Token::GT: result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, rhs, context); break; case Token::LTE: result = assembler->RelationalComparison( CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context); break; case Token::GTE: result = assembler->RelationalComparison( CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context); break; default: UNREACHABLE(); } __ SetAccumulator(result); __ Dispatch(); } // Add // // Add register to accumulator. void Interpreter::DoAdd(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Sub // // Subtract register from accumulator. void Interpreter::DoSub(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Mul // // Multiply accumulator by register . void Interpreter::DoMul(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Div // // Divide register by accumulator. void Interpreter::DoDiv(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Mod // // Modulo register by accumulator. void Interpreter::DoMod(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned), var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, lhs, &var_lhs_type_feedback); Node* rhs_value = __ TruncateTaggedToWord32WithFeedback( context, rhs, &var_rhs_type_feedback); Node* result = nullptr; switch (bitwise_op) { case Token::BIT_OR: { Node* value = __ Word32Or(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::BIT_AND: { Node* value = __ Word32And(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::BIT_XOR: { Node* value = __ Word32Xor(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::SHL: { Node* value = __ Word32Shl( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeInt32ToTagged(value); } break; case Token::SHR: { Node* value = __ Word32Shr( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeUint32ToTagged(value); } break; case Token::SAR: { Node* value = __ Word32Sar( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeInt32ToTagged(value); } break; default: UNREACHABLE(); } Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); if (FLAG_debug_code) { Label ok(assembler); __ GotoIf(__ TaggedIsSmi(result), &ok); Node* result_map = __ LoadMap(result); __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(), kExpectedHeapNumber); __ Goto(&ok); __ Bind(&ok); } Node* input_feedback = __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value()); __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // BitwiseOr // // BitwiseOr register to accumulator. void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_OR, assembler); } // BitwiseXor // // BitwiseXor register to accumulator. void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_XOR, assembler); } // BitwiseAnd // // BitwiseAnd register to accumulator. void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_AND, assembler); } // ShiftLeft // // Left shifts register by the count specified in the accumulator. // Register is converted to an int32 and the accumulator to uint32 // before the operation. 5 lsb bits from the accumulator are used as count // i.e. << (accumulator & 0x1F). void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SHL, assembler); } // ShiftRight // // Right shifts register by the count specified in the accumulator. // Result is sign extended. Register is converted to an int32 and the // accumulator to uint32 before the operation. 5 lsb bits from the accumulator // are used as count i.e. >> (accumulator & 0x1F). void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SAR, assembler); } // ShiftRightLogical // // Right Shifts register by the count specified in the accumulator. // Result is zero-filled. The accumulator and register are converted to // uint32 before the operation 5 lsb bits from the accumulator are used as // count i.e. << (accumulator & 0x1F). void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SHR, assembler); } // AddSmi // // Adds an immediate value to register . For this // operation is the lhs operand and is the operand. void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { Variable var_result(assembler, MachineRepresentation::kTagged); Label fastpath(assembler), slowpath(assembler, Label::kDeferred), end(assembler); Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); // {right} is known to be a Smi. // Check if the {left} is a Smi take the fast path. __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Bind(&fastpath); { // Try fast Smi addition first. Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), __ BitcastTaggedToWord(right)); Node* overflow = __ Projection(1, pair); // Check if the Smi additon overflowed. Label if_notoverflow(assembler); __ Branch(overflow, &slowpath, &if_notoverflow); __ Bind(&if_notoverflow); { __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), feedback_vector, slot_index); var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); __ Goto(&end); } } __ Bind(&slowpath); { Node* context = __ GetContext(); AddWithFeedbackStub stub(__ isolate()); Callable callable = Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate())); var_result.Bind(__ CallStub(callable, context, left, right, __ TruncateWordToWord32(slot_index), feedback_vector)); __ Goto(&end); } __ Bind(&end); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } // SubSmi // // Subtracts an immediate value to register . For this // operation is the lhs operand and is the rhs operand. void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { Variable var_result(assembler, MachineRepresentation::kTagged); Label fastpath(assembler), slowpath(assembler, Label::kDeferred), end(assembler); Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); // {right} is known to be a Smi. // Check if the {left} is a Smi take the fast path. __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Bind(&fastpath); { // Try fast Smi subtraction first. Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), __ BitcastTaggedToWord(right)); Node* overflow = __ Projection(1, pair); // Check if the Smi subtraction overflowed. Label if_notoverflow(assembler); __ Branch(overflow, &slowpath, &if_notoverflow); __ Bind(&if_notoverflow); { __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), feedback_vector, slot_index); var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); __ Goto(&end); } } __ Bind(&slowpath); { Node* context = __ GetContext(); SubtractWithFeedbackStub stub(__ isolate()); Callable callable = Callable( stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate())); var_result.Bind(__ CallStub(callable, context, left, right, __ TruncateWordToWord32(slot_index), feedback_vector)); __ Goto(&end); } __ Bind(&end); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } // BitwiseOr // // BitwiseOr with . For this operation is the lhs // operand and is the rhs operand. void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* value = __ Word32Or(lhs_value, rhs_value); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // BitwiseAnd // // BitwiseAnd with . For this operation is the lhs // operand and is the rhs operand. void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* value = __ Word32And(lhs_value, rhs_value); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ShiftLeftSmi // // Left shifts register by the count specified in . // Register is converted to an int32 before the operation. The 5 // lsb bits from are used as count i.e. << ( & 0x1F). void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); Node* value = __ Word32Shl(lhs_value, shift_count); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ShiftRightSmi // // Right shifts register by the count specified in . // Register is converted to an int32 before the operation. The 5 // lsb bits from are used as count i.e. << ( & 0x1F). void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); Node* value = __ Word32Sar(lhs_value, shift_count); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } Node* Interpreter::BuildUnaryOp(Callable callable, InterpreterAssembler* assembler) { Node* target = __ HeapConstant(callable.code()); Node* accumulator = __ GetAccumulator(); Node* context = __ GetContext(); return __ CallStub(callable.descriptor(), target, context, accumulator); } template void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); Node* result = Generator::Generate(assembler, value, context, feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ToName // // Convert the object referenced by the accumulator to a name. void Interpreter::DoToName(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ ToName(context, object); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // ToNumber // // Convert the object referenced by the accumulator to a number. void Interpreter::DoToNumber(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ ToNumber(context, object); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // ToObject // // Convert the object referenced by the accumulator to a JSReceiver. void Interpreter::DoToObject(InterpreterAssembler* assembler) { Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // Inc // // Increments value in the accumulator by one. void Interpreter::DoInc(InterpreterAssembler* assembler) { typedef CodeStubAssembler::Label Label; typedef compiler::Node Node; typedef CodeStubAssembler::Variable Variable; Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); // Shared entry for floating point increment. Label do_finc(assembler), end(assembler); Variable var_finc_value(assembler, MachineRepresentation::kFloat64); // We might need to try again due to ToNumber conversion. Variable value_var(assembler, MachineRepresentation::kTagged); Variable result_var(assembler, MachineRepresentation::kTagged); Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Variable* loop_vars[] = {&value_var, &var_type_feedback}; Label start(assembler, 2, loop_vars); value_var.Bind(value); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kNone)); assembler->Goto(&start); assembler->Bind(&start); { value = value_var.value(); Label if_issmi(assembler), if_isnotsmi(assembler); assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); assembler->Bind(&if_issmi); { // Try fast Smi addition first. Node* one = assembler->SmiConstant(Smi::FromInt(1)); Node* pair = assembler->IntPtrAddWithOverflow( assembler->BitcastTaggedToWord(value), assembler->BitcastTaggedToWord(one)); Node* overflow = assembler->Projection(1, pair); // Check if the Smi addition overflowed. Label if_overflow(assembler), if_notoverflow(assembler); assembler->Branch(overflow, &if_overflow, &if_notoverflow); assembler->Bind(&if_notoverflow); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); result_var.Bind( assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); assembler->Goto(&end); assembler->Bind(&if_overflow); { var_finc_value.Bind(assembler->SmiToFloat64(value)); assembler->Goto(&do_finc); } } assembler->Bind(&if_isnotsmi); { // Check if the value is a HeapNumber. Label if_valueisnumber(assembler), if_valuenotnumber(assembler, Label::kDeferred); Node* value_map = assembler->LoadMap(value); assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); assembler->Bind(&if_valueisnumber); { // Load the HeapNumber value. var_finc_value.Bind(assembler->LoadHeapNumberValue(value)); assembler->Goto(&do_finc); } assembler->Bind(&if_valuenotnumber); { // We do not require an Or with earlier feedback here because once we // convert the value to a number, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. CSA_ASSERT(assembler, assembler->SmiEqual( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNone))); Label if_valueisoddball(assembler), if_valuenotoddball(assembler); Node* instance_type = assembler->LoadMapInstanceType(value_map); Node* is_oddball = assembler->Word32Equal( instance_type, assembler->Int32Constant(ODDBALL_TYPE)); assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); assembler->Bind(&if_valueisoddball); { // Convert Oddball to Number and check again. value_var.Bind( assembler->LoadObjectField(value, Oddball::kToNumberOffset)); var_type_feedback.Bind(assembler->SmiConstant( BinaryOperationFeedback::kNumberOrOddball)); assembler->Goto(&start); } assembler->Bind(&if_valuenotoddball); { // Convert to a Number first and try again. Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate()); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kAny)); value_var.Bind(assembler->CallStub(callable, context, value)); assembler->Goto(&start); } } } } assembler->Bind(&do_finc); { Node* finc_value = var_finc_value.value(); Node* one = assembler->Float64Constant(1.0); Node* finc_result = assembler->Float64Add(finc_value, one); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNumber))); result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result)); assembler->Goto(&end); } assembler->Bind(&end); assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ SetAccumulator(result_var.value()); __ Dispatch(); } // Dec // // Decrements value in the accumulator by one. void Interpreter::DoDec(InterpreterAssembler* assembler) { typedef CodeStubAssembler::Label Label; typedef compiler::Node Node; typedef CodeStubAssembler::Variable Variable; Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); // Shared entry for floating point decrement. Label do_fdec(assembler), end(assembler); Variable var_fdec_value(assembler, MachineRepresentation::kFloat64); // We might need to try again due to ToNumber conversion. Variable value_var(assembler, MachineRepresentation::kTagged); Variable result_var(assembler, MachineRepresentation::kTagged); Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Variable* loop_vars[] = {&value_var, &var_type_feedback}; Label start(assembler, 2, loop_vars); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kNone)); value_var.Bind(value); assembler->Goto(&start); assembler->Bind(&start); { value = value_var.value(); Label if_issmi(assembler), if_isnotsmi(assembler); assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); assembler->Bind(&if_issmi); { // Try fast Smi subtraction first. Node* one = assembler->SmiConstant(Smi::FromInt(1)); Node* pair = assembler->IntPtrSubWithOverflow( assembler->BitcastTaggedToWord(value), assembler->BitcastTaggedToWord(one)); Node* overflow = assembler->Projection(1, pair); // Check if the Smi subtraction overflowed. Label if_overflow(assembler), if_notoverflow(assembler); assembler->Branch(overflow, &if_overflow, &if_notoverflow); assembler->Bind(&if_notoverflow); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); result_var.Bind( assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); assembler->Goto(&end); assembler->Bind(&if_overflow); { var_fdec_value.Bind(assembler->SmiToFloat64(value)); assembler->Goto(&do_fdec); } } assembler->Bind(&if_isnotsmi); { // Check if the value is a HeapNumber. Label if_valueisnumber(assembler), if_valuenotnumber(assembler, Label::kDeferred); Node* value_map = assembler->LoadMap(value); assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); assembler->Bind(&if_valueisnumber); { // Load the HeapNumber value. var_fdec_value.Bind(assembler->LoadHeapNumberValue(value)); assembler->Goto(&do_fdec); } assembler->Bind(&if_valuenotnumber); { // We do not require an Or with earlier feedback here because once we // convert the value to a number, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. CSA_ASSERT(assembler, assembler->SmiEqual( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNone))); Label if_valueisoddball(assembler), if_valuenotoddball(assembler); Node* instance_type = assembler->LoadMapInstanceType(value_map); Node* is_oddball = assembler->Word32Equal( instance_type, assembler->Int32Constant(ODDBALL_TYPE)); assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); assembler->Bind(&if_valueisoddball); { // Convert Oddball to Number and check again. value_var.Bind( assembler->LoadObjectField(value, Oddball::kToNumberOffset)); var_type_feedback.Bind(assembler->SmiConstant( BinaryOperationFeedback::kNumberOrOddball)); assembler->Goto(&start); } assembler->Bind(&if_valuenotoddball); { // Convert to a Number first and try again. Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate()); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kAny)); value_var.Bind(assembler->CallStub(callable, context, value)); assembler->Goto(&start); } } } } assembler->Bind(&do_fdec); { Node* fdec_value = var_fdec_value.value(); Node* one = assembler->Float64Constant(1.0); Node* fdec_result = assembler->Float64Sub(fdec_value, one); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNumber))); result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result)); assembler->Goto(&end); } assembler->Bind(&end); assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ SetAccumulator(result_var.value()); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, first casting the // accumulator to a boolean value if required. // ToBooleanLogicalNot void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { result.Bind(true_value); __ Goto(&end); } __ Bind(&end); __ SetAccumulator(result.value()); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, which must already be a boolean // value. void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ Branch(__ WordEqual(value, true_value), &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { if (FLAG_debug_code) { __ AbortIfWordNotEqual(value, false_value, BailoutReason::kExpectedBooleanValue); } result.Bind(true_value); __ Goto(&end); } __ Bind(&end); __ SetAccumulator(result.value()); __ Dispatch(); } // TypeOf // // Load the accumulator with the string representating type of the // object in the accumulator. void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = assembler->Typeof(value, context); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoDelete(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* key = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, object, key); __ SetAccumulator(result); __ Dispatch(); } // DeletePropertyStrict // // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Strict, assembler); } // DeletePropertySloppy // // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); } // GetSuperConstructor // // Get the super constructor from the object referenced by the accumulator. // The result is stored in register |reg|. void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) { Node* active_function = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ GetSuperConstructor(active_function, context); Node* reg = __ BytecodeOperandReg(0); __ StoreRegister(result, reg); __ Dispatch(); } void Interpreter::DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode) { Node* function_reg = __ BytecodeOperandReg(0); Node* function = __ LoadRegister(function_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* slot_id = __ BytecodeOperandIdx(3); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallJSWithFeedback(function, context, receiver_arg, args_count, slot_id, feedback_vector, tail_call_mode); __ SetAccumulator(result); __ Dispatch(); } // Call // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback // into |feedback_slot_id| void Interpreter::DoCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kDisallow); } // CallProperty // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback into // |feedback_slot_id|. The callable is known to be a property of the receiver. void Interpreter::DoCallProperty(InterpreterAssembler* assembler) { // TODO(leszeks): Look into making the interpreter use the fact that the // receiver is non-null. DoJSCall(assembler, TailCallMode::kDisallow); } // TailCall // // Tail call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback // into |feedback_slot_id| void Interpreter::DoTailCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kAllow); } // CallRuntime // // Call the runtime function |function_id| with the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // InvokeIntrinsic // // Implements the semantic equivalent of calling the runtime function // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandIntrinsicId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* arg_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); IntrinsicsHelper helper(assembler); Node* result = helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); __ SetAccumulator(result); __ Dispatch(); } // CallRuntimeForPair // // Call the runtime function |function_id| which returns a pair, with the // first argument in register |first_arg| and |arg_count| arguments in // subsequent registers. Returns the result in and // void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { // Call the runtime function. Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result_pair = __ CallRuntimeN(function_id, context, first_arg, args_count, 2); // Store the results in and Node* first_return_reg = __ BytecodeOperandReg(3); Node* second_return_reg = __ NextRegister(first_return_reg); Node* result0 = __ Projection(0, result_pair); Node* result1 = __ Projection(1, result_pair); __ StoreRegister(result0, first_return_reg); __ StoreRegister(result1, second_return_reg); __ Dispatch(); } // CallJSRuntime // // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { Node* context_index = __ BytecodeOperandIdx(0); Node* receiver_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); // Get the function to call from the native context. Node* context = __ GetContext(); Node* native_context = __ LoadNativeContext(context); Node* function = __ LoadContextElement(native_context, context_index); // Call the function. Node* result = __ CallJS(function, context, first_arg, args_count, TailCallMode::kDisallow); __ SetAccumulator(result); __ Dispatch(); } // CallWithSpread // // Call a JSfunction or Callable in |callable| with the receiver in // |first_arg| and |arg_count - 1| arguments in subsequent registers. The // final argument is always a spread. // void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) { Node* callable_reg = __ BytecodeOperandReg(0); Node* callable = __ LoadRegister(callable_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* context = __ GetContext(); // Call into Runtime function CallWithSpread which does everything. Node* result = __ CallJSWithSpread(callable, context, receiver_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // ConstructWithSpread // // Call the constructor in |constructor| with the first argument in register // |first_arg| and |arg_count| arguments in subsequent registers. The final // argument is always a spread. The new.target is in the accumulator. // void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) { Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ ConstructWithSpread(constructor, context, new_target, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // Construct // // Call operator construct with |constructor| and the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. The new.target is in the accumulator. // void Interpreter::DoConstruct(InterpreterAssembler* assembler) { Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* slot_id = __ BytecodeOperandIdx(3); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ Construct(constructor, context, new_target, first_arg, args_count, slot_id, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } // TestEqual // // Test if the value in the register equals the accumulator. void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::EQ, assembler); } // TestNotEqual // // Test if the value in the register is not equal to the accumulator. void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::NE, assembler); } // TestEqualStrict // // Test if the value in the register is strictly equal to the accumulator. void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler); } // TestLessThan // // Test if the value in the register is less than the accumulator. void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::LT, assembler); } // TestGreaterThan // // Test if the value in the register is greater than the accumulator. void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::GT, assembler); } // TestLessThanOrEqual // // Test if the value in the register is less than or equal to the // accumulator. void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::LTE, assembler); } // TestGreaterThanOrEqual // // Test if the value in the
(code)->entry(); } } } // static int Interpreter::InterruptBudget() { return FLAG_interrupt_budget * kCodeSizeMultiplier; } namespace { bool ShouldPrintBytecode(Handle shared) { if (!FLAG_print_bytecode) return false; // Checks whether function passed the filter. if (shared->is_toplevel()) { Vector filter = CStrVector(FLAG_print_bytecode_filter); return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*'); } else { return shared->PassesFilter(FLAG_print_bytecode_filter); } } } // namespace InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info) : CompilationJob(info->isolate(), info, "Ignition"), generator_(info), runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()), background_execute_counter_("CompileBackgroundIgnition"), print_bytecode_(ShouldPrintBytecode(info->shared_info())) {} InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() { CodeGenerator::MakeCodePrologue(info(), "interpreter"); if (print_bytecode_) { OFStream os(stdout); std::unique_ptr name = info()->GetDebugName(); os << "[generating bytecode for function: " << info()->GetDebugName().get() << "]" << std::endl << std::flush; } return SUCCEEDED; } InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() { TimerScope runtimeTimer = executed_on_background_thread() ? TimerScope(&background_execute_counter_) : TimerScope(runtime_call_stats_, &RuntimeCallStats::CompileIgnition); // TODO(lpy): add support for background compilation RCS trace. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition"); generator()->GenerateBytecode(stack_limit()); if (generator()->HasStackOverflow()) { return FAILED; } return SUCCEEDED; } InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() { // Add background runtime call stats. if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) { runtime_call_stats_->CompileBackgroundIgnition.Add( &background_execute_counter_); } RuntimeCallTimerScope runtimeTimer( runtime_call_stats_, &RuntimeCallStats::CompileIgnitionFinalization); Handle bytecodes = generator()->FinalizeBytecode(isolate()); if (generator()->HasStackOverflow()) { return FAILED; } if (print_bytecode_) { OFStream os(stdout); bytecodes->Print(os); os << std::flush; } info()->SetBytecodeArray(bytecodes); info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline()); return SUCCEEDED; } CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) { return new InterpreterCompilationJob(info); } bool Interpreter::IsDispatchTableInitialized() { return dispatch_table_[0] != nullptr; } bool Interpreter::ShouldInitializeDispatchTable() { if (FLAG_trace_ignition || FLAG_trace_ignition_codegen || FLAG_trace_ignition_dispatches) { // Regenerate table to add bytecode tracing operations, print the assembly // code generated by TurboFan or instrument handlers with dispatch counters. return true; } return !IsDispatchTableInitialized(); } void Interpreter::TraceCodegen(Handle code) { #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { OFStream os(stdout); code->Disassemble(nullptr, os); os << std::flush; } #endif // ENABLE_DISASSEMBLER } const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ code->entry()) { \ return #Name; \ } BYTECODE_LIST(RETURN_NAME) #undef RETURN_NAME #endif // ENABLE_DISASSEMBLER return nullptr; } uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const { int from_index = Bytecodes::ToByte(from); int to_index = Bytecodes::ToByte(to); return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes + to_index]; } Local Interpreter::GetDispatchCountersObject() { v8::Isolate* isolate = reinterpret_cast(isolate_); Local context = isolate->GetCurrentContext(); Local counters_map = v8::Object::New(isolate); // Output is a JSON-encoded object of objects. // // The keys on the top level object are source bytecodes, // and corresponding value are objects. Keys on these last are the // destinations of the dispatch and the value associated is a counter for // the correspondent source-destination dispatch chain. // // Only non-zero counters are written to file, but an entry in the top-level // object is always present, even if the value is empty because all counters // for that source are zero. for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) { Bytecode from_bytecode = Bytecodes::FromByte(from_index); Local counters_row = v8::Object::New(isolate); for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) { Bytecode to_bytecode = Bytecodes::FromByte(to_index); uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode); if (counter > 0) { std::string to_name = Bytecodes::ToString(to_bytecode); Local to_name_object = v8::String::NewFromUtf8(isolate, to_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); Local counter_object = v8::Number::New(isolate, counter); CHECK(counters_row ->DefineOwnProperty(context, to_name_object, counter_object) .IsJust()); } } std::string from_name = Bytecodes::ToString(from_bytecode); Local from_name_object = v8::String::NewFromUtf8(isolate, from_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); CHECK( counters_map->DefineOwnProperty(context, from_name_object, counters_row) .IsJust()); } return counters_map; } // LdaZero // // Load literal '0' into the accumulator. void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { Node* zero_value = __ NumberConstant(0.0); __ SetAccumulator(zero_value); __ Dispatch(); } // LdaSmi // // Load an integer literal into the accumulator as a Smi. void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { Node* smi_int = __ BytecodeOperandImmSmi(0); __ SetAccumulator(smi_int); __ Dispatch(); } // LdaConstant // // Load constant literal at |idx| in the constant pool into the accumulator. void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); __ SetAccumulator(constant); __ Dispatch(); } // LdaUndefined // // Load Undefined into the accumulator. void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); __ SetAccumulator(undefined_value); __ Dispatch(); } // LdaNull // // Load Null into the accumulator. void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); __ SetAccumulator(null_value); __ Dispatch(); } // LdaTheHole // // Load TheHole into the accumulator. void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); __ SetAccumulator(the_hole_value); __ Dispatch(); } // LdaTrue // // Load True into the accumulator. void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); __ SetAccumulator(true_value); __ Dispatch(); } // LdaFalse // // Load False into the accumulator. void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); __ SetAccumulator(false_value); __ Dispatch(); } // Ldar // // Load accumulator with value from register . void Interpreter::DoLdar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* value = __ LoadRegister(reg_index); __ SetAccumulator(value); __ Dispatch(); } // Star // // Store accumulator to register . void Interpreter::DoStar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* accumulator = __ GetAccumulator(); __ StoreRegister(accumulator, reg_index); __ Dispatch(); } // Mov // // Stores the value of register to register . void Interpreter::DoMov(InterpreterAssembler* assembler) { Node* src_index = __ BytecodeOperandReg(0); Node* src_value = __ LoadRegister(src_index); Node* dst_index = __ BytecodeOperandReg(1); __ StoreRegister(src_value, dst_index); __ Dispatch(); } void Interpreter::BuildLoadGlobal(int slot_operand_index, int name_operand_index, TypeofMode typeof_mode, InterpreterAssembler* assembler) { // Load the global via the LoadGlobalIC. Node* feedback_vector = __ LoadFeedbackVector(); Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index); AccessorAssembler accessor_asm(assembler->state()); Label try_handler(assembler, Label::kDeferred), miss(assembler, Label::kDeferred); // Fast path without frame construction for the data case. { Label done(assembler); Variable var_result(assembler, MachineRepresentation::kTagged); ExitPoint exit_point(assembler, &done, &var_result); accessor_asm.LoadGlobalIC_TryPropertyCellCase( feedback_vector, feedback_slot, &exit_point, &try_handler, &miss, CodeStubAssembler::INTPTR_PARAMETERS); __ Bind(&done); __ SetAccumulator(var_result.value()); __ Dispatch(); } // Slow path with frame construction. { Label done(assembler); Variable var_result(assembler, MachineRepresentation::kTagged); ExitPoint exit_point(assembler, &done, &var_result); __ Bind(&try_handler); { Node* context = __ GetContext(); Node* smi_slot = __ SmiTag(feedback_slot); Node* name_index = __ BytecodeOperandIdx(name_operand_index); Node* name = __ LoadConstantPoolEntry(name_index); AccessorAssembler::LoadICParameters params(context, nullptr, name, smi_slot, feedback_vector); accessor_asm.LoadGlobalIC_TryHandlerCase(¶ms, typeof_mode, &exit_point, &miss); } __ Bind(&miss); { Node* context = __ GetContext(); Node* smi_slot = __ SmiTag(feedback_slot); Node* name_index = __ BytecodeOperandIdx(name_operand_index); Node* name = __ LoadConstantPoolEntry(name_index); AccessorAssembler::LoadICParameters params(context, nullptr, name, smi_slot, feedback_vector); accessor_asm.LoadGlobalIC_MissCase(¶ms, &exit_point); } __ Bind(&done); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } } // LdaGlobal // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot outside of a typeof. void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF, assembler); } // LdaGlobalInsideTypeof // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot inside of a typeof. void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF, assembler); } void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); Node* native_context = __ LoadNativeContext(context); Node* global = __ LoadContextElement(native_context, Context::EXTENSION_INDEX); // Store the global via the StoreIC. Node* code_target = __ HeapConstant(ic.code()); Node* constant_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); __ CallStub(ic.descriptor(), code_target, context, global, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaGlobalSloppy // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in sloppy mode. void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStaGlobal(ic, assembler); } // StaGlobalStrict // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in strict mode. void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStaGlobal(ic, assembler); } // LdaContextSlot // // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Node* slot_context = __ GetContextAtDepth(context, depth); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // LdaImmutableContextSlot // // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) { // TODO(danno) Share the actual code object rather creating a duplicate one. DoLdaContextSlot(assembler); } // LdaCurrentContextSlot // // Load the object in |slot_index| of the current context into the accumulator. void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) { Node* slot_index = __ BytecodeOperandIdx(0); Node* slot_context = __ GetContext(); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // LdaImmutableCurrentContextSlot // // Load the object in |slot_index| of the current context into the accumulator. void Interpreter::DoLdaImmutableCurrentContextSlot( InterpreterAssembler* assembler) { // TODO(danno) Share the actual code object rather creating a duplicate one. DoLdaCurrentContextSlot(assembler); } // StaContextSlot // // Stores the object in the accumulator into |slot_index| of the context at // |depth| in the context chain starting at |context|. void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Node* slot_context = __ GetContextAtDepth(context, depth); __ StoreContextElement(slot_context, slot_index, value); __ Dispatch(); } // StaCurrentContextSlot // // Stores the object in the accumulator into |slot_index| of the current // context. void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* slot_index = __ BytecodeOperandIdx(0); Node* slot_context = __ GetContext(); __ StoreContextElement(slot_context, slot_index, value); __ Dispatch(); } void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* name_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(name_index); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* context = __ GetContext(); Node* name_index = __ BytecodeOperandIdx(0); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Label slowpath(assembler, Label::kDeferred); // Check for context extensions to allow the fast path. __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); // Fast path does a normal load context. { Node* slot_context = __ GetContextAtDepth(context, depth); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // Slow path when we have to call out to the runtime. __ Bind(&slowpath); { Node* name = __ LoadConstantPoolEntry(name_index); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) { DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupContextSlotInsideTypeof( InterpreterAssembler* assembler) { DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* context = __ GetContext(); Node* depth = __ BytecodeOperandUImm(2); Label slowpath(assembler, Label::kDeferred); // Check for context extensions to allow the fast path __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); // Fast path does a normal load global { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof ? INSIDE_TYPEOF : NOT_INSIDE_TYPEOF; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode, assembler); } // Slow path when we have to call out to the runtime __ Bind(&slowpath); { Node* name_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(name_index); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } } // LdaLookupGlobalSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) { DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupGlobalSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupGlobalSlotInsideTypeof( InterpreterAssembler* assembler) { DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoStaLookupSlot(LanguageMode language_mode, InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict : Runtime::kStoreLookupSlot_Sloppy, context, name, value); __ SetAccumulator(result); __ Dispatch(); } // StaLookupSlotSloppy // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in sloppy mode. void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::SLOPPY, assembler); } // StaLookupSlotStrict // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in strict mode. void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::STRICT, assembler); } // LdaNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry . void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* code_target = __ HeapConstant(ic.code()); Node* register_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(register_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } // KeyedLoadIC // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator. void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* code_target = __ HeapConstant(ic.code()); Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* name = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaNamedPropertySloppy // // Calls the sloppy mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStoreIC(ic, assembler); } // StaNamedPropertyStrict // // Calls the strict mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStoreIC(ic, assembler); } // StaNamedOwnProperty // // Calls the StoreOwnIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_); DoStoreIC(ic, assembler); } void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* name_reg_index = __ BytecodeOperandReg(1); Node* name = __ LoadRegister(name_reg_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaKeyedPropertySloppy // // Calls the sloppy mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); DoKeyedStoreIC(ic, assembler); } // StaKeyedPropertyStrict // // Calls the strict mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); DoKeyedStoreIC(ic, assembler); } // StaDataPropertyInLiteral // // Define a property with value from the accumulator in . // Property attributes and whether set_function_name are stored in // DataPropertyInLiteralFlags . // // This definition is not observable and is used only for definitions // in object or class literals. void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) { Node* object = __ LoadRegister(__ BytecodeOperandReg(0)); Node* name = __ LoadRegister(__ BytecodeOperandReg(1)); Node* value = __ GetAccumulator(); Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2)); Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3)); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name, value, flags, feedback_vector, vector_index); __ Dispatch(); } // LdaModuleVariable // // Load the contents of a module variable into the accumulator. The variable is // identified by . is the depth of the current context // relative to the module context. void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) { Node* cell_index = __ BytecodeOperandImmIntPtr(0); Node* depth = __ BytecodeOperandUImm(1); Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); Node* module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Label if_export(assembler), if_import(assembler), end(assembler); __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, &if_import); __ Bind(&if_export); { Node* regular_exports = __ LoadObjectField(module, Module::kRegularExportsOffset); // The actual array index is (cell_index - 1). Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); __ Goto(&end); } __ Bind(&if_import); { Node* regular_imports = __ LoadObjectField(module, Module::kRegularImportsOffset); // The actual array index is (-cell_index - 1). Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index); Node* cell = __ LoadFixedArrayElement(regular_imports, import_index); __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // StaModuleVariable // // Store accumulator to the module variable identified by . // is the depth of the current context relative to the module context. void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* cell_index = __ BytecodeOperandImmIntPtr(0); Node* depth = __ BytecodeOperandUImm(1); Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); Node* module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Label if_export(assembler), if_import(assembler), end(assembler); __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, &if_import); __ Bind(&if_export); { Node* regular_exports = __ LoadObjectField(module, Module::kRegularExportsOffset); // The actual array index is (cell_index - 1). Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); __ StoreObjectField(cell, Cell::kValueOffset, value); __ Goto(&end); } __ Bind(&if_import); { // Not supported (probably never). __ Abort(kUnsupportedModuleOperation); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // PushContext // // Saves the current context in , and pushes the accumulator as the // new current context. void Interpreter::DoPushContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* new_context = __ GetAccumulator(); Node* old_context = __ GetContext(); __ StoreRegister(old_context, reg_index); __ SetContext(new_context); __ Dispatch(); } // PopContext // // Pops the current context and sets as the new context. void Interpreter::DoPopContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); __ SetContext(context); __ Dispatch(); } // TODO(mythria): Remove this function once all CompareOps record type feedback. void Interpreter::DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* result; switch (compare_op) { case Token::IN: result = assembler->HasProperty(rhs, lhs, context); break; case Token::INSTANCEOF: result = assembler->InstanceOf(lhs, rhs, context); break; default: UNREACHABLE(); } __ SetAccumulator(result); __ Dispatch(); } template void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); Node* result = Generator::Generate(assembler, lhs, rhs, slot_index, feedback_vector, context); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); // TODO(interpreter): the only reason this check is here is because we // sometimes emit comparisons that shouldn't collect feedback (e.g. // try-finally blocks and generators), and we could get rid of this by // introducing Smi equality tests. Label gather_type_feedback(assembler), do_compare(assembler); __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare, &gather_type_feedback); __ Bind(&gather_type_feedback); { Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler), lhs_is_not_string(assembler), gather_rhs_type(assembler), update_feedback(assembler); __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi); var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kSignedSmall)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_smi); { Node* lhs_map = __ LoadMap(lhs); __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number); var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_number); { Node* lhs_instance_type = __ LoadInstanceType(lhs); if (Token::IsOrderedRelationalCompareOp(compare_op)) { Label lhs_is_not_oddball(assembler); __ GotoIfNot( __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), &lhs_is_not_oddball); var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_oddball); } Label lhs_is_not_string(assembler); __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type), &lhs_is_not_string); if (Token::IsOrderedRelationalCompareOp(compare_op)) { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kString)); } else { var_type_feedback.Bind(__ SelectSmiConstant( __ Word32Equal( __ Word32And(lhs_instance_type, __ Int32Constant(kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)), CompareOperationFeedback::kInternalizedString, CompareOperationFeedback::kString)); } __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_string); if (Token::IsEqualityOp(compare_op)) { var_type_feedback.Bind(__ SelectSmiConstant( __ IsJSReceiverInstanceType(lhs_instance_type), CompareOperationFeedback::kReceiver, CompareOperationFeedback::kAny)); } else { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kAny)); } __ Goto(&gather_rhs_type); } } __ Bind(&gather_rhs_type); { Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler); __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi); var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kSignedSmall))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_smi); { Node* rhs_map = __ LoadMap(rhs); __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number); var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kNumber))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_number); { Node* rhs_instance_type = __ LoadInstanceType(rhs); if (Token::IsOrderedRelationalCompareOp(compare_op)) { Label rhs_is_not_oddball(assembler); __ GotoIfNot(__ Word32Equal(rhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), &rhs_is_not_oddball); var_type_feedback.Bind(__ SmiOr( var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kNumberOrOddball))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_oddball); } Label rhs_is_not_string(assembler); __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type), &rhs_is_not_string); if (Token::IsOrderedRelationalCompareOp(compare_op)) { var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kString))); } else { var_type_feedback.Bind(__ SmiOr( var_type_feedback.value(), __ SelectSmiConstant( __ Word32Equal( __ Word32And(rhs_instance_type, __ Int32Constant(kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)), CompareOperationFeedback::kInternalizedString, CompareOperationFeedback::kString))); } __ Goto(&update_feedback); __ Bind(&rhs_is_not_string); if (Token::IsEqualityOp(compare_op)) { var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SelectSmiConstant( __ IsJSReceiverInstanceType(rhs_instance_type), CompareOperationFeedback::kReceiver, CompareOperationFeedback::kAny))); } else { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kAny)); } __ Goto(&update_feedback); } } } __ Bind(&update_feedback); { __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ Goto(&do_compare); } } __ Bind(&do_compare); Node* result; switch (compare_op) { case Token::EQ: result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs, context); break; case Token::NE: result = assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context); break; case Token::EQ_STRICT: result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs, rhs, context); break; case Token::LT: result = assembler->RelationalComparison(CodeStubAssembler::kLessThan, lhs, rhs, context); break; case Token::GT: result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, rhs, context); break; case Token::LTE: result = assembler->RelationalComparison( CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context); break; case Token::GTE: result = assembler->RelationalComparison( CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context); break; default: UNREACHABLE(); } __ SetAccumulator(result); __ Dispatch(); } // Add // // Add register to accumulator. void Interpreter::DoAdd(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Sub // // Subtract register from accumulator. void Interpreter::DoSub(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Mul // // Multiply accumulator by register . void Interpreter::DoMul(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Div // // Divide register by accumulator. void Interpreter::DoDiv(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Mod // // Modulo register by accumulator. void Interpreter::DoMod(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned), var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, lhs, &var_lhs_type_feedback); Node* rhs_value = __ TruncateTaggedToWord32WithFeedback( context, rhs, &var_rhs_type_feedback); Node* result = nullptr; switch (bitwise_op) { case Token::BIT_OR: { Node* value = __ Word32Or(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::BIT_AND: { Node* value = __ Word32And(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::BIT_XOR: { Node* value = __ Word32Xor(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::SHL: { Node* value = __ Word32Shl( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeInt32ToTagged(value); } break; case Token::SHR: { Node* value = __ Word32Shr( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeUint32ToTagged(value); } break; case Token::SAR: { Node* value = __ Word32Sar( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeInt32ToTagged(value); } break; default: UNREACHABLE(); } Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); if (FLAG_debug_code) { Label ok(assembler); __ GotoIf(__ TaggedIsSmi(result), &ok); Node* result_map = __ LoadMap(result); __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(), kExpectedHeapNumber); __ Goto(&ok); __ Bind(&ok); } Node* input_feedback = __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value()); __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // BitwiseOr // // BitwiseOr register to accumulator. void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_OR, assembler); } // BitwiseXor // // BitwiseXor register to accumulator. void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_XOR, assembler); } // BitwiseAnd // // BitwiseAnd register to accumulator. void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_AND, assembler); } // ShiftLeft // // Left shifts register by the count specified in the accumulator. // Register is converted to an int32 and the accumulator to uint32 // before the operation. 5 lsb bits from the accumulator are used as count // i.e. << (accumulator & 0x1F). void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SHL, assembler); } // ShiftRight // // Right shifts register by the count specified in the accumulator. // Result is sign extended. Register is converted to an int32 and the // accumulator to uint32 before the operation. 5 lsb bits from the accumulator // are used as count i.e. >> (accumulator & 0x1F). void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SAR, assembler); } // ShiftRightLogical // // Right Shifts register by the count specified in the accumulator. // Result is zero-filled. The accumulator and register are converted to // uint32 before the operation 5 lsb bits from the accumulator are used as // count i.e. << (accumulator & 0x1F). void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SHR, assembler); } // AddSmi // // Adds an immediate value to register . For this // operation is the lhs operand and is the operand. void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { Variable var_result(assembler, MachineRepresentation::kTagged); Label fastpath(assembler), slowpath(assembler, Label::kDeferred), end(assembler); Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); // {right} is known to be a Smi. // Check if the {left} is a Smi take the fast path. __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Bind(&fastpath); { // Try fast Smi addition first. Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), __ BitcastTaggedToWord(right)); Node* overflow = __ Projection(1, pair); // Check if the Smi additon overflowed. Label if_notoverflow(assembler); __ Branch(overflow, &slowpath, &if_notoverflow); __ Bind(&if_notoverflow); { __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), feedback_vector, slot_index); var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); __ Goto(&end); } } __ Bind(&slowpath); { Node* context = __ GetContext(); AddWithFeedbackStub stub(__ isolate()); Callable callable = Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate())); var_result.Bind(__ CallStub(callable, context, left, right, __ TruncateWordToWord32(slot_index), feedback_vector)); __ Goto(&end); } __ Bind(&end); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } // SubSmi // // Subtracts an immediate value to register . For this // operation is the lhs operand and is the rhs operand. void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { Variable var_result(assembler, MachineRepresentation::kTagged); Label fastpath(assembler), slowpath(assembler, Label::kDeferred), end(assembler); Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); // {right} is known to be a Smi. // Check if the {left} is a Smi take the fast path. __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Bind(&fastpath); { // Try fast Smi subtraction first. Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), __ BitcastTaggedToWord(right)); Node* overflow = __ Projection(1, pair); // Check if the Smi subtraction overflowed. Label if_notoverflow(assembler); __ Branch(overflow, &slowpath, &if_notoverflow); __ Bind(&if_notoverflow); { __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), feedback_vector, slot_index); var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); __ Goto(&end); } } __ Bind(&slowpath); { Node* context = __ GetContext(); SubtractWithFeedbackStub stub(__ isolate()); Callable callable = Callable( stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate())); var_result.Bind(__ CallStub(callable, context, left, right, __ TruncateWordToWord32(slot_index), feedback_vector)); __ Goto(&end); } __ Bind(&end); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } // BitwiseOr // // BitwiseOr with . For this operation is the lhs // operand and is the rhs operand. void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* value = __ Word32Or(lhs_value, rhs_value); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // BitwiseAnd // // BitwiseAnd with . For this operation is the lhs // operand and is the rhs operand. void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* value = __ Word32And(lhs_value, rhs_value); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ShiftLeftSmi // // Left shifts register by the count specified in . // Register is converted to an int32 before the operation. The 5 // lsb bits from are used as count i.e. << ( & 0x1F). void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); Node* value = __ Word32Shl(lhs_value, shift_count); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ShiftRightSmi // // Right shifts register by the count specified in . // Register is converted to an int32 before the operation. The 5 // lsb bits from are used as count i.e. << ( & 0x1F). void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); Node* value = __ Word32Sar(lhs_value, shift_count); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } Node* Interpreter::BuildUnaryOp(Callable callable, InterpreterAssembler* assembler) { Node* target = __ HeapConstant(callable.code()); Node* accumulator = __ GetAccumulator(); Node* context = __ GetContext(); return __ CallStub(callable.descriptor(), target, context, accumulator); } template void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); Node* result = Generator::Generate(assembler, value, context, feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ToName // // Convert the object referenced by the accumulator to a name. void Interpreter::DoToName(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ ToName(context, object); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // ToNumber // // Convert the object referenced by the accumulator to a number. void Interpreter::DoToNumber(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ ToNumber(context, object); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // ToObject // // Convert the object referenced by the accumulator to a JSReceiver. void Interpreter::DoToObject(InterpreterAssembler* assembler) { Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // Inc // // Increments value in the accumulator by one. void Interpreter::DoInc(InterpreterAssembler* assembler) { typedef CodeStubAssembler::Label Label; typedef compiler::Node Node; typedef CodeStubAssembler::Variable Variable; Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); // Shared entry for floating point increment. Label do_finc(assembler), end(assembler); Variable var_finc_value(assembler, MachineRepresentation::kFloat64); // We might need to try again due to ToNumber conversion. Variable value_var(assembler, MachineRepresentation::kTagged); Variable result_var(assembler, MachineRepresentation::kTagged); Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Variable* loop_vars[] = {&value_var, &var_type_feedback}; Label start(assembler, 2, loop_vars); value_var.Bind(value); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kNone)); assembler->Goto(&start); assembler->Bind(&start); { value = value_var.value(); Label if_issmi(assembler), if_isnotsmi(assembler); assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); assembler->Bind(&if_issmi); { // Try fast Smi addition first. Node* one = assembler->SmiConstant(Smi::FromInt(1)); Node* pair = assembler->IntPtrAddWithOverflow( assembler->BitcastTaggedToWord(value), assembler->BitcastTaggedToWord(one)); Node* overflow = assembler->Projection(1, pair); // Check if the Smi addition overflowed. Label if_overflow(assembler), if_notoverflow(assembler); assembler->Branch(overflow, &if_overflow, &if_notoverflow); assembler->Bind(&if_notoverflow); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); result_var.Bind( assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); assembler->Goto(&end); assembler->Bind(&if_overflow); { var_finc_value.Bind(assembler->SmiToFloat64(value)); assembler->Goto(&do_finc); } } assembler->Bind(&if_isnotsmi); { // Check if the value is a HeapNumber. Label if_valueisnumber(assembler), if_valuenotnumber(assembler, Label::kDeferred); Node* value_map = assembler->LoadMap(value); assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); assembler->Bind(&if_valueisnumber); { // Load the HeapNumber value. var_finc_value.Bind(assembler->LoadHeapNumberValue(value)); assembler->Goto(&do_finc); } assembler->Bind(&if_valuenotnumber); { // We do not require an Or with earlier feedback here because once we // convert the value to a number, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. CSA_ASSERT(assembler, assembler->SmiEqual( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNone))); Label if_valueisoddball(assembler), if_valuenotoddball(assembler); Node* instance_type = assembler->LoadMapInstanceType(value_map); Node* is_oddball = assembler->Word32Equal( instance_type, assembler->Int32Constant(ODDBALL_TYPE)); assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); assembler->Bind(&if_valueisoddball); { // Convert Oddball to Number and check again. value_var.Bind( assembler->LoadObjectField(value, Oddball::kToNumberOffset)); var_type_feedback.Bind(assembler->SmiConstant( BinaryOperationFeedback::kNumberOrOddball)); assembler->Goto(&start); } assembler->Bind(&if_valuenotoddball); { // Convert to a Number first and try again. Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate()); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kAny)); value_var.Bind(assembler->CallStub(callable, context, value)); assembler->Goto(&start); } } } } assembler->Bind(&do_finc); { Node* finc_value = var_finc_value.value(); Node* one = assembler->Float64Constant(1.0); Node* finc_result = assembler->Float64Add(finc_value, one); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNumber))); result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result)); assembler->Goto(&end); } assembler->Bind(&end); assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ SetAccumulator(result_var.value()); __ Dispatch(); } // Dec // // Decrements value in the accumulator by one. void Interpreter::DoDec(InterpreterAssembler* assembler) { typedef CodeStubAssembler::Label Label; typedef compiler::Node Node; typedef CodeStubAssembler::Variable Variable; Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); // Shared entry for floating point decrement. Label do_fdec(assembler), end(assembler); Variable var_fdec_value(assembler, MachineRepresentation::kFloat64); // We might need to try again due to ToNumber conversion. Variable value_var(assembler, MachineRepresentation::kTagged); Variable result_var(assembler, MachineRepresentation::kTagged); Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Variable* loop_vars[] = {&value_var, &var_type_feedback}; Label start(assembler, 2, loop_vars); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kNone)); value_var.Bind(value); assembler->Goto(&start); assembler->Bind(&start); { value = value_var.value(); Label if_issmi(assembler), if_isnotsmi(assembler); assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); assembler->Bind(&if_issmi); { // Try fast Smi subtraction first. Node* one = assembler->SmiConstant(Smi::FromInt(1)); Node* pair = assembler->IntPtrSubWithOverflow( assembler->BitcastTaggedToWord(value), assembler->BitcastTaggedToWord(one)); Node* overflow = assembler->Projection(1, pair); // Check if the Smi subtraction overflowed. Label if_overflow(assembler), if_notoverflow(assembler); assembler->Branch(overflow, &if_overflow, &if_notoverflow); assembler->Bind(&if_notoverflow); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); result_var.Bind( assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); assembler->Goto(&end); assembler->Bind(&if_overflow); { var_fdec_value.Bind(assembler->SmiToFloat64(value)); assembler->Goto(&do_fdec); } } assembler->Bind(&if_isnotsmi); { // Check if the value is a HeapNumber. Label if_valueisnumber(assembler), if_valuenotnumber(assembler, Label::kDeferred); Node* value_map = assembler->LoadMap(value); assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); assembler->Bind(&if_valueisnumber); { // Load the HeapNumber value. var_fdec_value.Bind(assembler->LoadHeapNumberValue(value)); assembler->Goto(&do_fdec); } assembler->Bind(&if_valuenotnumber); { // We do not require an Or with earlier feedback here because once we // convert the value to a number, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. CSA_ASSERT(assembler, assembler->SmiEqual( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNone))); Label if_valueisoddball(assembler), if_valuenotoddball(assembler); Node* instance_type = assembler->LoadMapInstanceType(value_map); Node* is_oddball = assembler->Word32Equal( instance_type, assembler->Int32Constant(ODDBALL_TYPE)); assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); assembler->Bind(&if_valueisoddball); { // Convert Oddball to Number and check again. value_var.Bind( assembler->LoadObjectField(value, Oddball::kToNumberOffset)); var_type_feedback.Bind(assembler->SmiConstant( BinaryOperationFeedback::kNumberOrOddball)); assembler->Goto(&start); } assembler->Bind(&if_valuenotoddball); { // Convert to a Number first and try again. Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate()); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kAny)); value_var.Bind(assembler->CallStub(callable, context, value)); assembler->Goto(&start); } } } } assembler->Bind(&do_fdec); { Node* fdec_value = var_fdec_value.value(); Node* one = assembler->Float64Constant(1.0); Node* fdec_result = assembler->Float64Sub(fdec_value, one); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNumber))); result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result)); assembler->Goto(&end); } assembler->Bind(&end); assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ SetAccumulator(result_var.value()); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, first casting the // accumulator to a boolean value if required. // ToBooleanLogicalNot void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { result.Bind(true_value); __ Goto(&end); } __ Bind(&end); __ SetAccumulator(result.value()); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, which must already be a boolean // value. void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ Branch(__ WordEqual(value, true_value), &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { if (FLAG_debug_code) { __ AbortIfWordNotEqual(value, false_value, BailoutReason::kExpectedBooleanValue); } result.Bind(true_value); __ Goto(&end); } __ Bind(&end); __ SetAccumulator(result.value()); __ Dispatch(); } // TypeOf // // Load the accumulator with the string representating type of the // object in the accumulator. void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = assembler->Typeof(value, context); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoDelete(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* key = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, object, key); __ SetAccumulator(result); __ Dispatch(); } // DeletePropertyStrict // // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Strict, assembler); } // DeletePropertySloppy // // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); } // GetSuperConstructor // // Get the super constructor from the object referenced by the accumulator. // The result is stored in register |reg|. void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) { Node* active_function = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ GetSuperConstructor(active_function, context); Node* reg = __ BytecodeOperandReg(0); __ StoreRegister(result, reg); __ Dispatch(); } void Interpreter::DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode) { Node* function_reg = __ BytecodeOperandReg(0); Node* function = __ LoadRegister(function_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* slot_id = __ BytecodeOperandIdx(3); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallJSWithFeedback(function, context, receiver_arg, args_count, slot_id, feedback_vector, tail_call_mode); __ SetAccumulator(result); __ Dispatch(); } // Call // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback // into |feedback_slot_id| void Interpreter::DoCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kDisallow); } // CallProperty // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback into // |feedback_slot_id|. The callable is known to be a property of the receiver. void Interpreter::DoCallProperty(InterpreterAssembler* assembler) { // TODO(leszeks): Look into making the interpreter use the fact that the // receiver is non-null. DoJSCall(assembler, TailCallMode::kDisallow); } // TailCall // // Tail call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback // into |feedback_slot_id| void Interpreter::DoTailCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kAllow); } // CallRuntime // // Call the runtime function |function_id| with the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // InvokeIntrinsic // // Implements the semantic equivalent of calling the runtime function // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandIntrinsicId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* arg_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); IntrinsicsHelper helper(assembler); Node* result = helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); __ SetAccumulator(result); __ Dispatch(); } // CallRuntimeForPair // // Call the runtime function |function_id| which returns a pair, with the // first argument in register |first_arg| and |arg_count| arguments in // subsequent registers. Returns the result in and // void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { // Call the runtime function. Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result_pair = __ CallRuntimeN(function_id, context, first_arg, args_count, 2); // Store the results in and Node* first_return_reg = __ BytecodeOperandReg(3); Node* second_return_reg = __ NextRegister(first_return_reg); Node* result0 = __ Projection(0, result_pair); Node* result1 = __ Projection(1, result_pair); __ StoreRegister(result0, first_return_reg); __ StoreRegister(result1, second_return_reg); __ Dispatch(); } // CallJSRuntime // // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { Node* context_index = __ BytecodeOperandIdx(0); Node* receiver_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); // Get the function to call from the native context. Node* context = __ GetContext(); Node* native_context = __ LoadNativeContext(context); Node* function = __ LoadContextElement(native_context, context_index); // Call the function. Node* result = __ CallJS(function, context, first_arg, args_count, TailCallMode::kDisallow); __ SetAccumulator(result); __ Dispatch(); } // CallWithSpread // // Call a JSfunction or Callable in |callable| with the receiver in // |first_arg| and |arg_count - 1| arguments in subsequent registers. The // final argument is always a spread. // void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) { Node* callable_reg = __ BytecodeOperandReg(0); Node* callable = __ LoadRegister(callable_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* context = __ GetContext(); // Call into Runtime function CallWithSpread which does everything. Node* result = __ CallJSWithSpread(callable, context, receiver_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // ConstructWithSpread // // Call the constructor in |constructor| with the first argument in register // |first_arg| and |arg_count| arguments in subsequent registers. The final // argument is always a spread. The new.target is in the accumulator. // void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) { Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ ConstructWithSpread(constructor, context, new_target, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // Construct // // Call operator construct with |constructor| and the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. The new.target is in the accumulator. // void Interpreter::DoConstruct(InterpreterAssembler* assembler) { Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* slot_id = __ BytecodeOperandIdx(3); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ Construct(constructor, context, new_target, first_arg, args_count, slot_id, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } // TestEqual // // Test if the value in the register equals the accumulator. void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::EQ, assembler); } // TestNotEqual // // Test if the value in the register is not equal to the accumulator. void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::NE, assembler); } // TestEqualStrict // // Test if the value in the register is strictly equal to the accumulator. void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler); } // TestLessThan // // Test if the value in the register is less than the accumulator. void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::LT, assembler); } // TestGreaterThan // // Test if the value in the register is greater than the accumulator. void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::GT, assembler); } // TestLessThanOrEqual // // Test if the value in the register is less than or equal to the // accumulator. void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::LTE, assembler); } // TestGreaterThanOrEqual // // Test if the value in the
code) { #ifdef ENABLE_DISASSEMBLER if (FLAG_trace_ignition_codegen) { OFStream os(stdout); code->Disassemble(nullptr, os); os << std::flush; } #endif // ENABLE_DISASSEMBLER } const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ code->entry()) { \ return #Name; \ } BYTECODE_LIST(RETURN_NAME) #undef RETURN_NAME #endif // ENABLE_DISASSEMBLER return nullptr; } uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const { int from_index = Bytecodes::ToByte(from); int to_index = Bytecodes::ToByte(to); return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes + to_index]; } Local Interpreter::GetDispatchCountersObject() { v8::Isolate* isolate = reinterpret_cast(isolate_); Local context = isolate->GetCurrentContext(); Local counters_map = v8::Object::New(isolate); // Output is a JSON-encoded object of objects. // // The keys on the top level object are source bytecodes, // and corresponding value are objects. Keys on these last are the // destinations of the dispatch and the value associated is a counter for // the correspondent source-destination dispatch chain. // // Only non-zero counters are written to file, but an entry in the top-level // object is always present, even if the value is empty because all counters // for that source are zero. for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) { Bytecode from_bytecode = Bytecodes::FromByte(from_index); Local counters_row = v8::Object::New(isolate); for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) { Bytecode to_bytecode = Bytecodes::FromByte(to_index); uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode); if (counter > 0) { std::string to_name = Bytecodes::ToString(to_bytecode); Local to_name_object = v8::String::NewFromUtf8(isolate, to_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); Local counter_object = v8::Number::New(isolate, counter); CHECK(counters_row ->DefineOwnProperty(context, to_name_object, counter_object) .IsJust()); } } std::string from_name = Bytecodes::ToString(from_bytecode); Local from_name_object = v8::String::NewFromUtf8(isolate, from_name.c_str(), NewStringType::kNormal) .ToLocalChecked(); CHECK( counters_map->DefineOwnProperty(context, from_name_object, counters_row) .IsJust()); } return counters_map; } // LdaZero // // Load literal '0' into the accumulator. void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { Node* zero_value = __ NumberConstant(0.0); __ SetAccumulator(zero_value); __ Dispatch(); } // LdaSmi // // Load an integer literal into the accumulator as a Smi. void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { Node* smi_int = __ BytecodeOperandImmSmi(0); __ SetAccumulator(smi_int); __ Dispatch(); } // LdaConstant // // Load constant literal at |idx| in the constant pool into the accumulator. void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { Node* index = __ BytecodeOperandIdx(0); Node* constant = __ LoadConstantPoolEntry(index); __ SetAccumulator(constant); __ Dispatch(); } // LdaUndefined // // Load Undefined into the accumulator. void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { Node* undefined_value = __ HeapConstant(isolate_->factory()->undefined_value()); __ SetAccumulator(undefined_value); __ Dispatch(); } // LdaNull // // Load Null into the accumulator. void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); __ SetAccumulator(null_value); __ Dispatch(); } // LdaTheHole // // Load TheHole into the accumulator. void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); __ SetAccumulator(the_hole_value); __ Dispatch(); } // LdaTrue // // Load True into the accumulator. void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); __ SetAccumulator(true_value); __ Dispatch(); } // LdaFalse // // Load False into the accumulator. void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); __ SetAccumulator(false_value); __ Dispatch(); } // Ldar // // Load accumulator with value from register . void Interpreter::DoLdar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* value = __ LoadRegister(reg_index); __ SetAccumulator(value); __ Dispatch(); } // Star // // Store accumulator to register . void Interpreter::DoStar(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* accumulator = __ GetAccumulator(); __ StoreRegister(accumulator, reg_index); __ Dispatch(); } // Mov // // Stores the value of register to register . void Interpreter::DoMov(InterpreterAssembler* assembler) { Node* src_index = __ BytecodeOperandReg(0); Node* src_value = __ LoadRegister(src_index); Node* dst_index = __ BytecodeOperandReg(1); __ StoreRegister(src_value, dst_index); __ Dispatch(); } void Interpreter::BuildLoadGlobal(int slot_operand_index, int name_operand_index, TypeofMode typeof_mode, InterpreterAssembler* assembler) { // Load the global via the LoadGlobalIC. Node* feedback_vector = __ LoadFeedbackVector(); Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index); AccessorAssembler accessor_asm(assembler->state()); Label try_handler(assembler, Label::kDeferred), miss(assembler, Label::kDeferred); // Fast path without frame construction for the data case. { Label done(assembler); Variable var_result(assembler, MachineRepresentation::kTagged); ExitPoint exit_point(assembler, &done, &var_result); accessor_asm.LoadGlobalIC_TryPropertyCellCase( feedback_vector, feedback_slot, &exit_point, &try_handler, &miss, CodeStubAssembler::INTPTR_PARAMETERS); __ Bind(&done); __ SetAccumulator(var_result.value()); __ Dispatch(); } // Slow path with frame construction. { Label done(assembler); Variable var_result(assembler, MachineRepresentation::kTagged); ExitPoint exit_point(assembler, &done, &var_result); __ Bind(&try_handler); { Node* context = __ GetContext(); Node* smi_slot = __ SmiTag(feedback_slot); Node* name_index = __ BytecodeOperandIdx(name_operand_index); Node* name = __ LoadConstantPoolEntry(name_index); AccessorAssembler::LoadICParameters params(context, nullptr, name, smi_slot, feedback_vector); accessor_asm.LoadGlobalIC_TryHandlerCase(¶ms, typeof_mode, &exit_point, &miss); } __ Bind(&miss); { Node* context = __ GetContext(); Node* smi_slot = __ SmiTag(feedback_slot); Node* name_index = __ BytecodeOperandIdx(name_operand_index); Node* name = __ LoadConstantPoolEntry(name_index); AccessorAssembler::LoadICParameters params(context, nullptr, name, smi_slot, feedback_vector); accessor_asm.LoadGlobalIC_MissCase(¶ms, &exit_point); } __ Bind(&done); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } } // LdaGlobal // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot outside of a typeof. void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF, assembler); } // LdaGlobalInsideTypeof // // Load the global with name in constant pool entry into the // accumulator using FeedBackVector slot inside of a typeof. void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF, assembler); } void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { // Get the global object. Node* context = __ GetContext(); Node* native_context = __ LoadNativeContext(context); Node* global = __ LoadContextElement(native_context, Context::EXTENSION_INDEX); // Store the global via the StoreIC. Node* code_target = __ HeapConstant(ic.code()); Node* constant_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); __ CallStub(ic.descriptor(), code_target, context, global, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaGlobalSloppy // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in sloppy mode. void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStaGlobal(ic, assembler); } // StaGlobalStrict // // Store the value in the accumulator into the global with name in constant pool // entry using FeedBackVector slot in strict mode. void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStaGlobal(ic, assembler); } // LdaContextSlot // // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Node* slot_context = __ GetContextAtDepth(context, depth); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // LdaImmutableContextSlot // // Load the object in |slot_index| of the context at |depth| in the context // chain starting at |context| into the accumulator. void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) { // TODO(danno) Share the actual code object rather creating a duplicate one. DoLdaContextSlot(assembler); } // LdaCurrentContextSlot // // Load the object in |slot_index| of the current context into the accumulator. void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) { Node* slot_index = __ BytecodeOperandIdx(0); Node* slot_context = __ GetContext(); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // LdaImmutableCurrentContextSlot // // Load the object in |slot_index| of the current context into the accumulator. void Interpreter::DoLdaImmutableCurrentContextSlot( InterpreterAssembler* assembler) { // TODO(danno) Share the actual code object rather creating a duplicate one. DoLdaCurrentContextSlot(assembler); } // StaContextSlot // // Stores the object in the accumulator into |slot_index| of the context at // |depth| in the context chain starting at |context|. void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Node* slot_context = __ GetContextAtDepth(context, depth); __ StoreContextElement(slot_context, slot_index, value); __ Dispatch(); } // StaCurrentContextSlot // // Stores the object in the accumulator into |slot_index| of the current // context. void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* slot_index = __ BytecodeOperandIdx(0); Node* slot_context = __ GetContext(); __ StoreContextElement(slot_context, slot_index, value); __ Dispatch(); } void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* name_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(name_index); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* context = __ GetContext(); Node* name_index = __ BytecodeOperandIdx(0); Node* slot_index = __ BytecodeOperandIdx(1); Node* depth = __ BytecodeOperandUImm(2); Label slowpath(assembler, Label::kDeferred); // Check for context extensions to allow the fast path. __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); // Fast path does a normal load context. { Node* slot_context = __ GetContextAtDepth(context, depth); Node* result = __ LoadContextElement(slot_context, slot_index); __ SetAccumulator(result); __ Dispatch(); } // Slow path when we have to call out to the runtime. __ Bind(&slowpath); { Node* name = __ LoadConstantPoolEntry(name_index); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } } // LdaLookupSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) { DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupContextSlotInsideTypeof( InterpreterAssembler* assembler) { DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* context = __ GetContext(); Node* depth = __ BytecodeOperandUImm(2); Label slowpath(assembler, Label::kDeferred); // Check for context extensions to allow the fast path __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); // Fast path does a normal load global { static const int kNameOperandIndex = 0; static const int kSlotOperandIndex = 1; TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof ? INSIDE_TYPEOF : NOT_INSIDE_TYPEOF; BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode, assembler); } // Slow path when we have to call out to the runtime __ Bind(&slowpath); { Node* name_index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(name_index); Node* result = __ CallRuntime(function_id, context, name); __ SetAccumulator(result); __ Dispatch(); } } // LdaLookupGlobalSlot // // Lookup the object with the name in constant pool entry |name_index| // dynamically. void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) { DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler); } // LdaLookupGlobalSlotInsideTypeof // // Lookup the object with the name in constant pool entry |name_index| // dynamically without causing a NoReferenceError. void Interpreter::DoLdaLookupGlobalSlotInsideTypeof( InterpreterAssembler* assembler) { DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); } void Interpreter::DoStaLookupSlot(LanguageMode language_mode, InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* index = __ BytecodeOperandIdx(0); Node* name = __ LoadConstantPoolEntry(index); Node* context = __ GetContext(); Node* result = __ CallRuntime(is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict : Runtime::kStoreLookupSlot_Sloppy, context, name, value); __ SetAccumulator(result); __ Dispatch(); } // StaLookupSlotSloppy // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in sloppy mode. void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::SLOPPY, assembler); } // StaLookupSlotStrict // // Store the object in accumulator to the object with the name in constant // pool entry |name_index| in strict mode. void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { DoStaLookupSlot(LanguageMode::STRICT, assembler); } // LdaNamedProperty // // Calls the LoadIC at FeedBackVector slot for and the name at // constant pool entry . void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); Node* code_target = __ HeapConstant(ic.code()); Node* register_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(register_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } // KeyedLoadIC // // Calls the KeyedLoadIC at FeedBackVector slot for and the key // in the accumulator. void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); Node* code_target = __ HeapConstant(ic.code()); Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* name = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(1); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallStub(ic.descriptor(), code_target, context, object, name, smi_slot, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* constant_index = __ BytecodeOperandIdx(1); Node* name = __ LoadConstantPoolEntry(constant_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaNamedPropertySloppy // // Calls the sloppy mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); DoStoreIC(ic, assembler); } // StaNamedPropertyStrict // // Calls the strict mode StoreIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); DoStoreIC(ic, assembler); } // StaNamedOwnProperty // // Calls the StoreOwnIC at FeedBackVector slot for and // the name in constant pool entry with the value in the // accumulator. void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) { Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_); DoStoreIC(ic, assembler); } void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { Node* code_target = __ HeapConstant(ic.code()); Node* object_reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(object_reg_index); Node* name_reg_index = __ BytecodeOperandReg(1); Node* name = __ LoadRegister(name_reg_index); Node* value = __ GetAccumulator(); Node* raw_slot = __ BytecodeOperandIdx(2); Node* smi_slot = __ SmiTag(raw_slot); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallStub(ic.descriptor(), code_target, context, object, name, value, smi_slot, feedback_vector); __ Dispatch(); } // StaKeyedPropertySloppy // // Calls the sloppy mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); DoKeyedStoreIC(ic, assembler); } // StaKeyedPropertyStrict // // Calls the strict mode KeyStoreIC at FeedBackVector slot for // and the key with the value in the accumulator. void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); DoKeyedStoreIC(ic, assembler); } // StaDataPropertyInLiteral // // Define a property with value from the accumulator in . // Property attributes and whether set_function_name are stored in // DataPropertyInLiteralFlags . // // This definition is not observable and is used only for definitions // in object or class literals. void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) { Node* object = __ LoadRegister(__ BytecodeOperandReg(0)); Node* name = __ LoadRegister(__ BytecodeOperandReg(1)); Node* value = __ GetAccumulator(); Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2)); Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3)); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name, value, flags, feedback_vector, vector_index); __ Dispatch(); } // LdaModuleVariable // // Load the contents of a module variable into the accumulator. The variable is // identified by . is the depth of the current context // relative to the module context. void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) { Node* cell_index = __ BytecodeOperandImmIntPtr(0); Node* depth = __ BytecodeOperandUImm(1); Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); Node* module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Label if_export(assembler), if_import(assembler), end(assembler); __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, &if_import); __ Bind(&if_export); { Node* regular_exports = __ LoadObjectField(module, Module::kRegularExportsOffset); // The actual array index is (cell_index - 1). Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); __ Goto(&end); } __ Bind(&if_import); { Node* regular_imports = __ LoadObjectField(module, Module::kRegularImportsOffset); // The actual array index is (-cell_index - 1). Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index); Node* cell = __ LoadFixedArrayElement(regular_imports, import_index); __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // StaModuleVariable // // Store accumulator to the module variable identified by . // is the depth of the current context relative to the module context. void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* cell_index = __ BytecodeOperandImmIntPtr(0); Node* depth = __ BytecodeOperandUImm(1); Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); Node* module = __ LoadContextElement(module_context, Context::EXTENSION_INDEX); Label if_export(assembler), if_import(assembler), end(assembler); __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, &if_import); __ Bind(&if_export); { Node* regular_exports = __ LoadObjectField(module, Module::kRegularExportsOffset); // The actual array index is (cell_index - 1). Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); __ StoreObjectField(cell, Cell::kValueOffset, value); __ Goto(&end); } __ Bind(&if_import); { // Not supported (probably never). __ Abort(kUnsupportedModuleOperation); __ Goto(&end); } __ Bind(&end); __ Dispatch(); } // PushContext // // Saves the current context in , and pushes the accumulator as the // new current context. void Interpreter::DoPushContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* new_context = __ GetAccumulator(); Node* old_context = __ GetContext(); __ StoreRegister(old_context, reg_index); __ SetContext(new_context); __ Dispatch(); } // PopContext // // Pops the current context and sets as the new context. void Interpreter::DoPopContext(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* context = __ LoadRegister(reg_index); __ SetContext(context); __ Dispatch(); } // TODO(mythria): Remove this function once all CompareOps record type feedback. void Interpreter::DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* result; switch (compare_op) { case Token::IN: result = assembler->HasProperty(rhs, lhs, context); break; case Token::INSTANCEOF: result = assembler->InstanceOf(lhs, rhs, context); break; default: UNREACHABLE(); } __ SetAccumulator(result); __ Dispatch(); } template void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); Node* result = Generator::Generate(assembler, lhs, rhs, slot_index, feedback_vector, context); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); // TODO(interpreter): the only reason this check is here is because we // sometimes emit comparisons that shouldn't collect feedback (e.g. // try-finally blocks and generators), and we could get rid of this by // introducing Smi equality tests. Label gather_type_feedback(assembler), do_compare(assembler); __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare, &gather_type_feedback); __ Bind(&gather_type_feedback); { Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler), lhs_is_not_string(assembler), gather_rhs_type(assembler), update_feedback(assembler); __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi); var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kSignedSmall)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_smi); { Node* lhs_map = __ LoadMap(lhs); __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number); var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_number); { Node* lhs_instance_type = __ LoadInstanceType(lhs); if (Token::IsOrderedRelationalCompareOp(compare_op)) { Label lhs_is_not_oddball(assembler); __ GotoIfNot( __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), &lhs_is_not_oddball); var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)); __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_oddball); } Label lhs_is_not_string(assembler); __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type), &lhs_is_not_string); if (Token::IsOrderedRelationalCompareOp(compare_op)) { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kString)); } else { var_type_feedback.Bind(__ SelectSmiConstant( __ Word32Equal( __ Word32And(lhs_instance_type, __ Int32Constant(kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)), CompareOperationFeedback::kInternalizedString, CompareOperationFeedback::kString)); } __ Goto(&gather_rhs_type); __ Bind(&lhs_is_not_string); if (Token::IsEqualityOp(compare_op)) { var_type_feedback.Bind(__ SelectSmiConstant( __ IsJSReceiverInstanceType(lhs_instance_type), CompareOperationFeedback::kReceiver, CompareOperationFeedback::kAny)); } else { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kAny)); } __ Goto(&gather_rhs_type); } } __ Bind(&gather_rhs_type); { Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler); __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi); var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kSignedSmall))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_smi); { Node* rhs_map = __ LoadMap(rhs); __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number); var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kNumber))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_number); { Node* rhs_instance_type = __ LoadInstanceType(rhs); if (Token::IsOrderedRelationalCompareOp(compare_op)) { Label rhs_is_not_oddball(assembler); __ GotoIfNot(__ Word32Equal(rhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), &rhs_is_not_oddball); var_type_feedback.Bind(__ SmiOr( var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kNumberOrOddball))); __ Goto(&update_feedback); __ Bind(&rhs_is_not_oddball); } Label rhs_is_not_string(assembler); __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type), &rhs_is_not_string); if (Token::IsOrderedRelationalCompareOp(compare_op)) { var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SmiConstant(CompareOperationFeedback::kString))); } else { var_type_feedback.Bind(__ SmiOr( var_type_feedback.value(), __ SelectSmiConstant( __ Word32Equal( __ Word32And(rhs_instance_type, __ Int32Constant(kIsNotInternalizedMask)), __ Int32Constant(kInternalizedTag)), CompareOperationFeedback::kInternalizedString, CompareOperationFeedback::kString))); } __ Goto(&update_feedback); __ Bind(&rhs_is_not_string); if (Token::IsEqualityOp(compare_op)) { var_type_feedback.Bind( __ SmiOr(var_type_feedback.value(), __ SelectSmiConstant( __ IsJSReceiverInstanceType(rhs_instance_type), CompareOperationFeedback::kReceiver, CompareOperationFeedback::kAny))); } else { var_type_feedback.Bind( __ SmiConstant(CompareOperationFeedback::kAny)); } __ Goto(&update_feedback); } } } __ Bind(&update_feedback); { __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ Goto(&do_compare); } } __ Bind(&do_compare); Node* result; switch (compare_op) { case Token::EQ: result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs, context); break; case Token::NE: result = assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context); break; case Token::EQ_STRICT: result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs, rhs, context); break; case Token::LT: result = assembler->RelationalComparison(CodeStubAssembler::kLessThan, lhs, rhs, context); break; case Token::GT: result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, rhs, context); break; case Token::LTE: result = assembler->RelationalComparison( CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context); break; case Token::GTE: result = assembler->RelationalComparison( CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context); break; default: UNREACHABLE(); } __ SetAccumulator(result); __ Dispatch(); } // Add // // Add register to accumulator. void Interpreter::DoAdd(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Sub // // Subtract register from accumulator. void Interpreter::DoSub(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Mul // // Multiply accumulator by register . void Interpreter::DoMul(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Div // // Divide register by accumulator. void Interpreter::DoDiv(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } // Mod // // Modulo register by accumulator. void Interpreter::DoMod(InterpreterAssembler* assembler) { DoBinaryOpWithFeedback(assembler); } void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* lhs = __ LoadRegister(reg_index); Node* rhs = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(1); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned), var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, lhs, &var_lhs_type_feedback); Node* rhs_value = __ TruncateTaggedToWord32WithFeedback( context, rhs, &var_rhs_type_feedback); Node* result = nullptr; switch (bitwise_op) { case Token::BIT_OR: { Node* value = __ Word32Or(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::BIT_AND: { Node* value = __ Word32And(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::BIT_XOR: { Node* value = __ Word32Xor(lhs_value, rhs_value); result = __ ChangeInt32ToTagged(value); } break; case Token::SHL: { Node* value = __ Word32Shl( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeInt32ToTagged(value); } break; case Token::SHR: { Node* value = __ Word32Shr( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeUint32ToTagged(value); } break; case Token::SAR: { Node* value = __ Word32Sar( lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); result = __ ChangeInt32ToTagged(value); } break; default: UNREACHABLE(); } Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); if (FLAG_debug_code) { Label ok(assembler); __ GotoIf(__ TaggedIsSmi(result), &ok); Node* result_map = __ LoadMap(result); __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(), kExpectedHeapNumber); __ Goto(&ok); __ Bind(&ok); } Node* input_feedback = __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value()); __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // BitwiseOr // // BitwiseOr register to accumulator. void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_OR, assembler); } // BitwiseXor // // BitwiseXor register to accumulator. void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_XOR, assembler); } // BitwiseAnd // // BitwiseAnd register to accumulator. void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::BIT_AND, assembler); } // ShiftLeft // // Left shifts register by the count specified in the accumulator. // Register is converted to an int32 and the accumulator to uint32 // before the operation. 5 lsb bits from the accumulator are used as count // i.e. << (accumulator & 0x1F). void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SHL, assembler); } // ShiftRight // // Right shifts register by the count specified in the accumulator. // Result is sign extended. Register is converted to an int32 and the // accumulator to uint32 before the operation. 5 lsb bits from the accumulator // are used as count i.e. >> (accumulator & 0x1F). void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SAR, assembler); } // ShiftRightLogical // // Right Shifts register by the count specified in the accumulator. // Result is zero-filled. The accumulator and register are converted to // uint32 before the operation 5 lsb bits from the accumulator are used as // count i.e. << (accumulator & 0x1F). void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { DoBitwiseBinaryOp(Token::SHR, assembler); } // AddSmi // // Adds an immediate value to register . For this // operation is the lhs operand and is the operand. void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { Variable var_result(assembler, MachineRepresentation::kTagged); Label fastpath(assembler), slowpath(assembler, Label::kDeferred), end(assembler); Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); // {right} is known to be a Smi. // Check if the {left} is a Smi take the fast path. __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Bind(&fastpath); { // Try fast Smi addition first. Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), __ BitcastTaggedToWord(right)); Node* overflow = __ Projection(1, pair); // Check if the Smi additon overflowed. Label if_notoverflow(assembler); __ Branch(overflow, &slowpath, &if_notoverflow); __ Bind(&if_notoverflow); { __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), feedback_vector, slot_index); var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); __ Goto(&end); } } __ Bind(&slowpath); { Node* context = __ GetContext(); AddWithFeedbackStub stub(__ isolate()); Callable callable = Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate())); var_result.Bind(__ CallStub(callable, context, left, right, __ TruncateWordToWord32(slot_index), feedback_vector)); __ Goto(&end); } __ Bind(&end); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } // SubSmi // // Subtracts an immediate value to register . For this // operation is the lhs operand and is the rhs operand. void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { Variable var_result(assembler, MachineRepresentation::kTagged); Label fastpath(assembler), slowpath(assembler, Label::kDeferred), end(assembler); Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); // {right} is known to be a Smi. // Check if the {left} is a Smi take the fast path. __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Bind(&fastpath); { // Try fast Smi subtraction first. Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), __ BitcastTaggedToWord(right)); Node* overflow = __ Projection(1, pair); // Check if the Smi subtraction overflowed. Label if_notoverflow(assembler); __ Branch(overflow, &slowpath, &if_notoverflow); __ Bind(&if_notoverflow); { __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), feedback_vector, slot_index); var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); __ Goto(&end); } } __ Bind(&slowpath); { Node* context = __ GetContext(); SubtractWithFeedbackStub stub(__ isolate()); Callable callable = Callable( stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate())); var_result.Bind(__ CallStub(callable, context, left, right, __ TruncateWordToWord32(slot_index), feedback_vector)); __ Goto(&end); } __ Bind(&end); { __ SetAccumulator(var_result.value()); __ Dispatch(); } } // BitwiseOr // // BitwiseOr with . For this operation is the lhs // operand and is the rhs operand. void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* value = __ Word32Or(lhs_value, rhs_value); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // BitwiseAnd // // BitwiseAnd with . For this operation is the lhs // operand and is the rhs operand. void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* value = __ Word32And(lhs_value, rhs_value); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ShiftLeftSmi // // Left shifts register by the count specified in . // Register is converted to an int32 before the operation. The 5 // lsb bits from are used as count i.e. << ( & 0x1F). void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); Node* value = __ Word32Shl(lhs_value, shift_count); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ShiftRightSmi // // Right shifts register by the count specified in . // Register is converted to an int32 before the operation. The 5 // lsb bits from are used as count i.e. << ( & 0x1F). void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(1); Node* left = __ LoadRegister(reg_index); Node* right = __ BytecodeOperandImmSmi(0); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(2); Node* feedback_vector = __ LoadFeedbackVector(); Variable var_lhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( context, left, &var_lhs_type_feedback); Node* rhs_value = __ SmiToWord32(right); Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); Node* value = __ Word32Sar(lhs_value, shift_count); Node* result = __ ChangeInt32ToTagged(value); Node* result_type = __ SelectSmiConstant( __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, BinaryOperationFeedback::kNumber); __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } Node* Interpreter::BuildUnaryOp(Callable callable, InterpreterAssembler* assembler) { Node* target = __ HeapConstant(callable.code()); Node* accumulator = __ GetAccumulator(); Node* context = __ GetContext(); return __ CallStub(callable.descriptor(), target, context, accumulator); } template void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); Node* result = Generator::Generate(assembler, value, context, feedback_vector, slot_index); __ SetAccumulator(result); __ Dispatch(); } // ToName // // Convert the object referenced by the accumulator to a name. void Interpreter::DoToName(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ ToName(context, object); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // ToNumber // // Convert the object referenced by the accumulator to a number. void Interpreter::DoToNumber(InterpreterAssembler* assembler) { Node* object = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ ToNumber(context, object); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // ToObject // // Convert the object referenced by the accumulator to a JSReceiver. void Interpreter::DoToObject(InterpreterAssembler* assembler) { Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler); __ StoreRegister(result, __ BytecodeOperandReg(0)); __ Dispatch(); } // Inc // // Increments value in the accumulator by one. void Interpreter::DoInc(InterpreterAssembler* assembler) { typedef CodeStubAssembler::Label Label; typedef compiler::Node Node; typedef CodeStubAssembler::Variable Variable; Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); // Shared entry for floating point increment. Label do_finc(assembler), end(assembler); Variable var_finc_value(assembler, MachineRepresentation::kFloat64); // We might need to try again due to ToNumber conversion. Variable value_var(assembler, MachineRepresentation::kTagged); Variable result_var(assembler, MachineRepresentation::kTagged); Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Variable* loop_vars[] = {&value_var, &var_type_feedback}; Label start(assembler, 2, loop_vars); value_var.Bind(value); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kNone)); assembler->Goto(&start); assembler->Bind(&start); { value = value_var.value(); Label if_issmi(assembler), if_isnotsmi(assembler); assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); assembler->Bind(&if_issmi); { // Try fast Smi addition first. Node* one = assembler->SmiConstant(Smi::FromInt(1)); Node* pair = assembler->IntPtrAddWithOverflow( assembler->BitcastTaggedToWord(value), assembler->BitcastTaggedToWord(one)); Node* overflow = assembler->Projection(1, pair); // Check if the Smi addition overflowed. Label if_overflow(assembler), if_notoverflow(assembler); assembler->Branch(overflow, &if_overflow, &if_notoverflow); assembler->Bind(&if_notoverflow); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); result_var.Bind( assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); assembler->Goto(&end); assembler->Bind(&if_overflow); { var_finc_value.Bind(assembler->SmiToFloat64(value)); assembler->Goto(&do_finc); } } assembler->Bind(&if_isnotsmi); { // Check if the value is a HeapNumber. Label if_valueisnumber(assembler), if_valuenotnumber(assembler, Label::kDeferred); Node* value_map = assembler->LoadMap(value); assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); assembler->Bind(&if_valueisnumber); { // Load the HeapNumber value. var_finc_value.Bind(assembler->LoadHeapNumberValue(value)); assembler->Goto(&do_finc); } assembler->Bind(&if_valuenotnumber); { // We do not require an Or with earlier feedback here because once we // convert the value to a number, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. CSA_ASSERT(assembler, assembler->SmiEqual( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNone))); Label if_valueisoddball(assembler), if_valuenotoddball(assembler); Node* instance_type = assembler->LoadMapInstanceType(value_map); Node* is_oddball = assembler->Word32Equal( instance_type, assembler->Int32Constant(ODDBALL_TYPE)); assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); assembler->Bind(&if_valueisoddball); { // Convert Oddball to Number and check again. value_var.Bind( assembler->LoadObjectField(value, Oddball::kToNumberOffset)); var_type_feedback.Bind(assembler->SmiConstant( BinaryOperationFeedback::kNumberOrOddball)); assembler->Goto(&start); } assembler->Bind(&if_valuenotoddball); { // Convert to a Number first and try again. Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate()); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kAny)); value_var.Bind(assembler->CallStub(callable, context, value)); assembler->Goto(&start); } } } } assembler->Bind(&do_finc); { Node* finc_value = var_finc_value.value(); Node* one = assembler->Float64Constant(1.0); Node* finc_result = assembler->Float64Add(finc_value, one); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNumber))); result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result)); assembler->Goto(&end); } assembler->Bind(&end); assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ SetAccumulator(result_var.value()); __ Dispatch(); } // Dec // // Decrements value in the accumulator by one. void Interpreter::DoDec(InterpreterAssembler* assembler) { typedef CodeStubAssembler::Label Label; typedef compiler::Node Node; typedef CodeStubAssembler::Variable Variable; Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* slot_index = __ BytecodeOperandIdx(0); Node* feedback_vector = __ LoadFeedbackVector(); // Shared entry for floating point decrement. Label do_fdec(assembler), end(assembler); Variable var_fdec_value(assembler, MachineRepresentation::kFloat64); // We might need to try again due to ToNumber conversion. Variable value_var(assembler, MachineRepresentation::kTagged); Variable result_var(assembler, MachineRepresentation::kTagged); Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); Variable* loop_vars[] = {&value_var, &var_type_feedback}; Label start(assembler, 2, loop_vars); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kNone)); value_var.Bind(value); assembler->Goto(&start); assembler->Bind(&start); { value = value_var.value(); Label if_issmi(assembler), if_isnotsmi(assembler); assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); assembler->Bind(&if_issmi); { // Try fast Smi subtraction first. Node* one = assembler->SmiConstant(Smi::FromInt(1)); Node* pair = assembler->IntPtrSubWithOverflow( assembler->BitcastTaggedToWord(value), assembler->BitcastTaggedToWord(one)); Node* overflow = assembler->Projection(1, pair); // Check if the Smi subtraction overflowed. Label if_overflow(assembler), if_notoverflow(assembler); assembler->Branch(overflow, &if_overflow, &if_notoverflow); assembler->Bind(&if_notoverflow); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); result_var.Bind( assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); assembler->Goto(&end); assembler->Bind(&if_overflow); { var_fdec_value.Bind(assembler->SmiToFloat64(value)); assembler->Goto(&do_fdec); } } assembler->Bind(&if_isnotsmi); { // Check if the value is a HeapNumber. Label if_valueisnumber(assembler), if_valuenotnumber(assembler, Label::kDeferred); Node* value_map = assembler->LoadMap(value); assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); assembler->Bind(&if_valueisnumber); { // Load the HeapNumber value. var_fdec_value.Bind(assembler->LoadHeapNumberValue(value)); assembler->Goto(&do_fdec); } assembler->Bind(&if_valuenotnumber); { // We do not require an Or with earlier feedback here because once we // convert the value to a number, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. CSA_ASSERT(assembler, assembler->SmiEqual( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNone))); Label if_valueisoddball(assembler), if_valuenotoddball(assembler); Node* instance_type = assembler->LoadMapInstanceType(value_map); Node* is_oddball = assembler->Word32Equal( instance_type, assembler->Int32Constant(ODDBALL_TYPE)); assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); assembler->Bind(&if_valueisoddball); { // Convert Oddball to Number and check again. value_var.Bind( assembler->LoadObjectField(value, Oddball::kToNumberOffset)); var_type_feedback.Bind(assembler->SmiConstant( BinaryOperationFeedback::kNumberOrOddball)); assembler->Goto(&start); } assembler->Bind(&if_valuenotoddball); { // Convert to a Number first and try again. Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate()); var_type_feedback.Bind( assembler->SmiConstant(BinaryOperationFeedback::kAny)); value_var.Bind(assembler->CallStub(callable, context, value)); assembler->Goto(&start); } } } } assembler->Bind(&do_fdec); { Node* fdec_value = var_fdec_value.value(); Node* one = assembler->Float64Constant(1.0); Node* fdec_result = assembler->Float64Sub(fdec_value, one); var_type_feedback.Bind(assembler->SmiOr( var_type_feedback.value(), assembler->SmiConstant(BinaryOperationFeedback::kNumber))); result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result)); assembler->Goto(&end); } assembler->Bind(&end); assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); __ SetAccumulator(result_var.value()); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, first casting the // accumulator to a boolean value if required. // ToBooleanLogicalNot void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { result.Bind(true_value); __ Goto(&end); } __ Bind(&end); __ SetAccumulator(result.value()); __ Dispatch(); } // LogicalNot // // Perform logical-not on the accumulator, which must already be a boolean // value. void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Variable result(assembler, MachineRepresentation::kTagged); Label if_true(assembler), if_false(assembler), end(assembler); Node* true_value = __ BooleanConstant(true); Node* false_value = __ BooleanConstant(false); __ Branch(__ WordEqual(value, true_value), &if_true, &if_false); __ Bind(&if_true); { result.Bind(false_value); __ Goto(&end); } __ Bind(&if_false); { if (FLAG_debug_code) { __ AbortIfWordNotEqual(value, false_value, BailoutReason::kExpectedBooleanValue); } result.Bind(true_value); __ Goto(&end); } __ Bind(&end); __ SetAccumulator(result.value()); __ Dispatch(); } // TypeOf // // Load the accumulator with the string representating type of the // object in the accumulator. void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { Node* value = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = assembler->Typeof(value, context); __ SetAccumulator(result); __ Dispatch(); } void Interpreter::DoDelete(Runtime::FunctionId function_id, InterpreterAssembler* assembler) { Node* reg_index = __ BytecodeOperandReg(0); Node* object = __ LoadRegister(reg_index); Node* key = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ CallRuntime(function_id, context, object, key); __ SetAccumulator(result); __ Dispatch(); } // DeletePropertyStrict // // Delete the property specified in the accumulator from the object // referenced by the register operand following strict mode semantics. void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Strict, assembler); } // DeletePropertySloppy // // Delete the property specified in the accumulator from the object // referenced by the register operand following sloppy mode semantics. void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); } // GetSuperConstructor // // Get the super constructor from the object referenced by the accumulator. // The result is stored in register |reg|. void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) { Node* active_function = __ GetAccumulator(); Node* context = __ GetContext(); Node* result = __ GetSuperConstructor(active_function, context); Node* reg = __ BytecodeOperandReg(0); __ StoreRegister(result, reg); __ Dispatch(); } void Interpreter::DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode) { Node* function_reg = __ BytecodeOperandReg(0); Node* function = __ LoadRegister(function_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* slot_id = __ BytecodeOperandIdx(3); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ CallJSWithFeedback(function, context, receiver_arg, args_count, slot_id, feedback_vector, tail_call_mode); __ SetAccumulator(result); __ Dispatch(); } // Call // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback // into |feedback_slot_id| void Interpreter::DoCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kDisallow); } // CallProperty // // Call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback into // |feedback_slot_id|. The callable is known to be a property of the receiver. void Interpreter::DoCallProperty(InterpreterAssembler* assembler) { // TODO(leszeks): Look into making the interpreter use the fact that the // receiver is non-null. DoJSCall(assembler, TailCallMode::kDisallow); } // TailCall // // Tail call a JSfunction or Callable in |callable| with the |receiver| and // |arg_count| arguments in subsequent registers. Collect type feedback // into |feedback_slot_id| void Interpreter::DoTailCall(InterpreterAssembler* assembler) { DoJSCall(assembler, TailCallMode::kAllow); } // CallRuntime // // Call the runtime function |function_id| with the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // InvokeIntrinsic // // Implements the semantic equivalent of calling the runtime function // |function_id| with the first argument in |first_arg| and |arg_count| // arguments in subsequent registers. void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { Node* function_id = __ BytecodeOperandIntrinsicId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* arg_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); IntrinsicsHelper helper(assembler); Node* result = helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); __ SetAccumulator(result); __ Dispatch(); } // CallRuntimeForPair // // Call the runtime function |function_id| which returns a pair, with the // first argument in register |first_arg| and |arg_count| arguments in // subsequent registers. Returns the result in and // void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { // Call the runtime function. Node* function_id = __ BytecodeOperandRuntimeId(0); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result_pair = __ CallRuntimeN(function_id, context, first_arg, args_count, 2); // Store the results in and Node* first_return_reg = __ BytecodeOperandReg(3); Node* second_return_reg = __ NextRegister(first_return_reg); Node* result0 = __ Projection(0, result_pair); Node* result1 = __ Projection(1, result_pair); __ StoreRegister(result0, first_return_reg); __ StoreRegister(result1, second_return_reg); __ Dispatch(); } // CallJSRuntime // // Call the JS runtime function that has the |context_index| with the receiver // in register |receiver| and |arg_count| arguments in subsequent registers. void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { Node* context_index = __ BytecodeOperandIdx(0); Node* receiver_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); // Get the function to call from the native context. Node* context = __ GetContext(); Node* native_context = __ LoadNativeContext(context); Node* function = __ LoadContextElement(native_context, context_index); // Call the function. Node* result = __ CallJS(function, context, first_arg, args_count, TailCallMode::kDisallow); __ SetAccumulator(result); __ Dispatch(); } // CallWithSpread // // Call a JSfunction or Callable in |callable| with the receiver in // |first_arg| and |arg_count - 1| arguments in subsequent registers. The // final argument is always a spread. // void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) { Node* callable_reg = __ BytecodeOperandReg(0); Node* callable = __ LoadRegister(callable_reg); Node* receiver_reg = __ BytecodeOperandReg(1); Node* receiver_arg = __ RegisterLocation(receiver_reg); Node* receiver_args_count = __ BytecodeOperandCount(2); Node* receiver_count = __ Int32Constant(1); Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); Node* context = __ GetContext(); // Call into Runtime function CallWithSpread which does everything. Node* result = __ CallJSWithSpread(callable, context, receiver_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // ConstructWithSpread // // Call the constructor in |constructor| with the first argument in register // |first_arg| and |arg_count| arguments in subsequent registers. The final // argument is always a spread. The new.target is in the accumulator. // void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) { Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* context = __ GetContext(); Node* result = __ ConstructWithSpread(constructor, context, new_target, first_arg, args_count); __ SetAccumulator(result); __ Dispatch(); } // Construct // // Call operator construct with |constructor| and the first argument in // register |first_arg| and |arg_count| arguments in subsequent // registers. The new.target is in the accumulator. // void Interpreter::DoConstruct(InterpreterAssembler* assembler) { Node* new_target = __ GetAccumulator(); Node* constructor_reg = __ BytecodeOperandReg(0); Node* constructor = __ LoadRegister(constructor_reg); Node* first_arg_reg = __ BytecodeOperandReg(1); Node* first_arg = __ RegisterLocation(first_arg_reg); Node* args_count = __ BytecodeOperandCount(2); Node* slot_id = __ BytecodeOperandIdx(3); Node* feedback_vector = __ LoadFeedbackVector(); Node* context = __ GetContext(); Node* result = __ Construct(constructor, context, new_target, first_arg, args_count, slot_id, feedback_vector); __ SetAccumulator(result); __ Dispatch(); } // TestEqual // // Test if the value in the register equals the accumulator. void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::EQ, assembler); } // TestNotEqual // // Test if the value in the register is not equal to the accumulator. void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::NE, assembler); } // TestEqualStrict // // Test if the value in the register is strictly equal to the accumulator. void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler); } // TestLessThan // // Test if the value in the register is less than the accumulator. void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::LT, assembler); } // TestGreaterThan // // Test if the value in the register is greater than the accumulator. void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::GT, assembler); } // TestLessThanOrEqual // // Test if the value in the register is less than or equal to the // accumulator. void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { DoCompareOpWithFeedback(Token::Value::LTE, assembler); } // TestGreaterThanOrEqual // // Test if the value in the