// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_MIPS
#include "ic-inl.h"
#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register receiver,
Register name,
// Number of the cache entry, not scaled.
Register offset,
Register scratch,
Register scratch2,
Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
ASSERT(map_off_addr > key_off_addr);
ASSERT((map_off_addr - key_off_addr) % 4 == 0);
ASSERT((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
Register base_addr = scratch;
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ sll(offset_scratch, offset, 1);
__ Addu(offset_scratch, offset_scratch, offset);
// Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset));
__ sll(at, offset_scratch, kPointerSizeLog2);
__ Addu(base_addr, base_addr, at);
// Check that the key in the entry matches the name.
__ lw(at, MemOperand(base_addr, 0));
__ Branch(&miss, ne, name, Operand(at));
// Check the map matches.
__ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
__ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Branch(&miss, ne, at, Operand(scratch2));
// Get the code entry from the cache.
Register code = scratch2;
scratch2 = no_reg;
__ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
__ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
__ Branch(&miss, ne, flags_reg, Operand(flags));
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
// Miss: fall through.
__ bind(&miss);
}
void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
Handle<Name> name,
Register scratch0,
Register scratch1) {
ASSERT(name->IsUniqueName());
ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
Label done;
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
Register map = scratch1;
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
__ Branch(miss_label, ne, scratch0, Operand(zero_reg));
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
// Load properties array.
Register properties = scratch0;
__ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
__ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
__ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(masm,
miss_label,
&done,
receiver,
properties,
name,
scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch,
Register extra,
Register extra2,
Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
ASSERT(sizeof(Entry) == 12);
// Make sure the flags does not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
ASSERT(!extra.is(receiver));
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
ASSERT(!extra2.is(receiver));
ASSERT(!extra2.is(name));
ASSERT(!extra2.is(scratch));
ASSERT(!extra2.is(extra));
// Check register validity.
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
ASSERT(!extra3.is(no_reg));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
extra2, extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ srl(scratch, scratch, kHeapObjectTagSize);
__ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
ProbeTable(isolate,
masm,
flags,
kPrimary,
receiver,
name,
scratch,
extra,
extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ srl(at, name, kHeapObjectTagSize);
__ Subu(scratch, scratch, at);
uint32_t mask2 = kSecondaryTableSize - 1;
__ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
ProbeTable(isolate,
masm,
flags,
kSecondary,
receiver,
name,
scratch,
extra,
extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
extra2, extra3);
}
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
__ lw(prototype,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
__ lw(prototype,
FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
__ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ lw(prototype,
FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
__ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
}
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm,
int index,
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ lw(prototype,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ASSERT(!prototype.is(at));
__ li(at, isolate->global_object());
__ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
__ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
}
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
bool inobject,
int index,
Representation representation) {
ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
offset = offset + FixedArray::kHeaderSize;
__ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
src = dst;
}
__ lw(dst, FieldMemOperand(src, offset));
}
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ GetObjectType(receiver, scratch, scratch);
__ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
// Load length directly from the JS array.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// Generate code to check if an object is a string. If the object is a
// heap object, its map's instance type is left in the scratch1 register.
// If this is not needed, scratch1 and scratch2 may be the same register.
static void GenerateStringCheck(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* smi,
Label* non_string_object) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, smi, t0);
// Check that the object is a string.
__ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ And(scratch2, scratch1, Operand(kIsNotStringMask));
// The cast is to resolve the overload for the argument of 0x0.
__ Branch(non_string_object,
ne,
scratch2,
Operand(static_cast<int32_t>(kStringTag)));
}
// Generate code to load the length from a string object and return the length.
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
// Unwrap the value and check if the wrapped value is a string.
__ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, scratch1);
}
void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<JSGlobalObject> global,
Handle<Name> name,
Register scratch,
Label* miss) {
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ li(scratch, Operand(cell));
__ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
Handle<Name> name,
Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss, holder_reg, name, scratch1(), scratch2());
}
}
// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
Register storage_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss_label,
Label* slow) {
// a0 : value.
Label exit;
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
__ mtc1(scratch1, f6);
__ cvt_d_w(f4, f6);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
miss_label, DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
if (details.type() == FIELD &&
object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
__ li(a2, Operand(transition));
__ Push(a2, a0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
3, 1);
return;
}
// Update the map of the object.
__ li(scratch1, Operand(transition));
__ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
scratch2,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
if (details.type() == CONSTANT) {
ASSERT(value_reg.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
return;
}
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
storage_reg,
scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
if (FLAG_track_double_fields && representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
storage_reg,
receiver_reg,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
}
// Return the value (register v0).
ASSERT(value_reg.is(a0));
__ bind(&exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
// Generate StoreField code, value is passed in a0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Label* miss_label) {
// a0 : value
Label exit;
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
int index = lookup->GetFieldIndex().field_index();
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
__ lw(scratch1, FieldMemOperand(receiver_reg, offset));
} else {
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ lw(scratch1, FieldMemOperand(scratch1, offset));
}
// Store the value into the storage.
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch2, value_reg);
__ mtc1(scratch2, f6);
__ cvt_d_w(f4, f6);
__ jmp(&do_store);
__ bind(&heap_number);
__ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
miss_label, DONT_DO_SMI_CHECK);
__ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
__ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
// Return the value (register v0).
ASSERT(value_reg.is(a0));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
return;
}
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
__ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array.
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
}
// Return the value (register v0).
ASSERT(value_reg.is(a0));
__ bind(&exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ li(this->name(), Operand(name));
}
}
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
__ li(scratch, Operand(interceptor));
__ Push(scratch, receiver, holder);
}
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
Handle<JSObject> holder_obj,
IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
ExternalReference(IC_Utility(id), masm->isolate()),
StubCache::kInterceptorArgsLength);
}
static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
// Reserves space for the extra arguments to API function in the
// caller's frame.
//
// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
Register scratch) {
ASSERT(Smi::FromInt(0) == 0);
for (int i = 0; i < kFastApiCallArguments; i++) {
__ push(zero_reg);
}
}
// Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
__ Drop(kFastApiCallArguments);
}
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc,
bool restore_context) {
// ----------- S t a t e -------------
// -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
// : holder (set by CheckPrototypes)
// -- sp[28] : last JS argument
// -- ...
// -- sp[(argc + 6) * 4] : first JS argument
// -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
typedef FunctionCallbackArguments FCA;
// Save calling context.
__ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ li(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
__ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
// Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ li(a0, api_call_info);
__ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
} else {
__ li(t2, call_data);
}
// Store call data.
__ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
// Store isolate.
__ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
__ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
// Store ReturnValue default and ReturnValue.
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
__ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
__ Move(a2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// a0 = FunctionCallbackInfo&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
__ Addu(a0, sp, kPointerSize);
// FunctionCallbackInfo::implicit_args_
__ sw(a2, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ sw(t0, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ li(t0, Operand(argc));
__ sw(t0, MemOperand(a0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call = 0
__ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref =
ExternalReference(&fun,
type,
masm->isolate());
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
MemOperand return_value_operand(
fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
a1,
kStackUnwindSpace,
return_value_operand,
restore_context ?
&context_restore_operand : NULL);
}
// Generate call to api function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
Register receiver,
Register scratch,
int argc,
Register* values) {
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
typedef FunctionCallbackArguments FCA;
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ Subu(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
__ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ sw(receiver, MemOperand(sp, index * kPointerSize));
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
ASSERT(!receiver.is(values[i]));
ASSERT(!scratch.is(values[i]));
__ sw(receiver, MemOperand(sp, index-- * kPointerSize));
}
GenerateFastApiDirectCall(masm, optimization, argc, true);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(CallStubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
name_(name),
extra_ic_state_(extra_ic_state) {}
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
LookupResult* lookup,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
if (optimization.is_constant_call()) {
CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
holder, lookup, name, optimization, miss);
} else {
CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
name, holder, miss);
}
}
private:
void CompileCacheable(MacroAssembler* masm,
Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name,
const CallOptimization& optimization,
Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
Counters* counters = masm->isolate()->counters();
int depth1 = kInvalidProtoDepth;
int depth2 = kInvalidProtoDepth;
bool can_do_fast_api_call = false;
if (optimization.is_simple_api_call() &&
!lookup->holder()->IsGlobalObject()) {
depth1 = optimization.GetPrototypeDepthOfExpectedType(
object, interceptor_holder);
if (depth1 == kInvalidProtoDepth) {
depth2 = optimization.GetPrototypeDepthOfExpectedType(
interceptor_holder, Handle<JSObject>(lookup->holder()));
}
can_do_fast_api_call =
depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
}
__ IncrementCounter(counters->call_const_interceptor(), 1,
scratch1, scratch2);
if (can_do_fast_api_call) {
__ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
scratch1, scratch2);
ReserveSpaceForFastApiCall(masm, scratch1);
}
// Check that the maps from receiver to interceptor's holder
// haven't changed and thus we can invoke interceptor.
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
stub_compiler_->CheckPrototypes(
IC::CurrentTypeOf(object, masm->isolate()), receiver,
interceptor_holder, scratch1, scratch2, scratch3,
name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
Label regular_invoke;
LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
®ular_invoke);
// Interceptor returned nothing for this property. Try to use cached
// constant function.
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
if (*interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(
IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
handle(lookup->holder()), scratch1, scratch2, scratch3,
name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
// safe to omit it here, as if present, it should be fetched
// by the previous CheckPrototypes.
ASSERT(depth2 == kInvalidProtoDepth);
}
// Invoke function.
if (can_do_fast_api_call) {
GenerateFastApiDirectCall(
masm, optimization, arguments_.immediate(), false);
} else {
Handle<JSFunction> function = optimization.constant_function();
__ Move(a0, receiver);
stub_compiler_->GenerateJumpFunction(object, function);
}
// Deferred code for fast API call case---clean preallocated space.
if (can_do_fast_api_call) {
__ bind(&miss_cleanup);
FreeSpaceForFastApiCall(masm);
__ Branch(miss_label);
}
// Invoke a regular function.
__ bind(®ular_invoke);
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
}
void CompileRegular(MacroAssembler* masm,
Handle<JSObject> object,
Register receiver,
Register scratch1,
Register scratch2,
Register scratch3,
Handle<Name> name,
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(
IC::CurrentTypeOf(object, masm->isolate()), receiver,
interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
// Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
CompileCallLoadPropertyWithInterceptor(
masm, receiver, holder, name_, interceptor_holder,
IC::kLoadPropertyWithInterceptorForCall);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
Handle<JSObject> holder_obj,
Register scratch,
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(receiver, holder, name_);
CompileCallLoadPropertyWithInterceptor(
masm, receiver, holder, name_, holder_obj,
IC::kLoadPropertyWithInterceptorOnly);
__ pop(name_);
__ pop(holder);
__ pop(receiver);
}
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
}
CallStubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
ExtraICState extra_ic_state_;
};
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
#undef __
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ li(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
&& !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
__ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
Handle<JSObject> current = Handle<JSObject>::null();
if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
ASSERT(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->is_dictionary_map() &&
!current_map->IsJSGlobalObjectMap() &&
!current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
__ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
if (depth != 1 || check == CHECK_ALL_MAPS) {
// CheckMap implicitly loads the map of |reg| into |map_reg|.
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(
masm(), Handle<JSGlobalObject>::cast(current), name,
scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
if (heap()->InNewSpace(*prototype)) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
__ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ li(reg, Operand(prototype));
}
}
if (save_at_depth == depth) {
__ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
}
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
__ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
ASSERT(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
// Return the register containing the holder.
return reg;
}
void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ Branch(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ Branch(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
ASSERT(!reg.is(scratch3()));
ASSERT(!reg.is(scratch4()));
// Load the properties dictionary.
Register dictionary = scratch4();
__ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
Label probe_done;
NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
&miss,
&probe_done,
dictionary,
this->name(),
scratch2(),
scratch3());
__ bind(&probe_done);
// If probing finds an entry in the dictionary, scratch3 contains the
// pointer into the dictionary. Check that the value is the callback.
Register pointer = scratch3();
const int kElementsStartOffset = NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
__ Branch(&miss, ne, scratch2(), Operand(callback));
}
HandlerFrontendFooter(name, &miss);
return reg;
}
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
} else {
KeyedLoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
representation);
GenerateTailCall(masm(), stub.GetCode(isolate()));
}
}
void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ li(v0, value);
__ Ret();
}
void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
if (heap()->InNewSpace(callback->data())) {
__ li(scratch3(), callback);
__ lw(scratch3(), FieldMemOperand(scratch3(),
ExecutableAccessorInfo::kDataOffset));
} else {
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
__ Subu(sp, sp, 6 * kPointerSize);
__ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
__ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
__ li(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
__ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
__ sw(reg, MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
__ Addu(scratch2(), sp, 1 * kPointerSize);
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
__ mov(a0, sp); // (first argument - a0) = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
// (second argument - a1) = AccessorInfo&
__ Addu(a1, sp, kPointerSize);
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
ExternalReference::Type thunk_type =
ExternalReference::PROFILING_GETTER_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
isolate());
__ CallApiFunctionAndReturn(ref,
getter_address,
thunk_ref,
a2,
kStackUnwindSpace,
MemOperand(fp, 6 * kPointerSize),
NULL);
}
void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
ExecutableAccessorInfo* callback =
ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
}
if (compile_followup_inline) {
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
__ Push(holder_reg, this->name());
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), interceptor_holder,
IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
__ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
__ pop(receiver());
}
// Leave the internal frame.
}
GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
PushInterceptorArguments(masm(), receiver(), holder_reg,
this->name(), interceptor_holder);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ Branch(miss, ne, a2, Operand(name));
}
}
void CallStubCompiler::GenerateFunctionCheck(Register function,
Register scratch,
Label* miss) {
__ JumpIfSmi(function, miss);
__ GetObjectType(function, scratch, scratch);
__ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
}
void CallStubCompiler::GenerateLoadFunctionFromCell(
Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss) {
// Get the value from the cell.
__ li(a3, Operand(cell));
__ lw(a1, FieldMemOperand(a3, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
GenerateFunctionCheck(a1, a3, miss);
// Check the shared function info. Make sure it hasn't changed.
__ li(a3, Handle<SharedFunctionInfo>(function->shared()));
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Branch(miss, ne, t0, Operand(a3));
} else {
__ Branch(miss, ne, a1, Operand(function));
}
}
void CallStubCompiler::GenerateMissBranch() {
Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
extra_state());
__ Jump(code, RelocInfo::CODE_TARGET);
}
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name) {
Label miss;
Register reg = HandlerFrontendHeader(
object, holder, name, RECEIVER_MAP_CHECK, &miss);
GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
GenerateJumpFunction(object, a1, &miss);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::FAST, name);
}
Handle<Code> CallStubCompiler::CompileArrayCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
site->SetElementsKind(GetInitialFastElementsKind());
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
const int argc = arguments().immediate();
__ li(a0, Operand(argc));
__ li(a2, Operand(site_feedback_cell));
__ li(a1, Operand(function));
ArrayConstructorStub stub(isolate());
__ TailCallStub(&stub);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
Register receiver = a0;
Register scratch = a1;
const int argc = arguments().immediate();
if (argc == 0) {
// Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ DropAndRet(argc + 1);
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = t2;
Register end_elements = t1;
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into scratch and calculate new length.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
// Check if value is a smi.
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
// Check for a smi.
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into scratch and calculate new length.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ Branch(&call_builtin, gt, scratch, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
t0, scratch, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&with_write_barrier);
__ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(a3, t3, ¬_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(¬_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
__ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
// a3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a3,
t3,
&try_holey_map);
__ mov(a2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
a3,
t3,
&call_builtin);
__ mov(a2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
}
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
__ RecordWrite(elements,
end_elements,
t0,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
// t0: elements' length.
if (!FLAG_inline_new) {
__ Branch(&call_builtin);
}
__ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(a2, &no_fast_elements_check);
__ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(t3, t3, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
__ li(t3, Operand(new_space_allocation_top));
__ lw(a3, MemOperand(t3));
__ Branch(&call_builtin, ne, end_elements, Operand(a3));
__ li(t5, Operand(new_space_allocation_limit));
__ lw(t5, MemOperand(t5));
__ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
__ Branch(&call_builtin, hi, a3, Operand(t5));
// We fit and could grow elements.
// Update new_space_allocation_top.
__ sw(a3, MemOperand(t3));
// Push the argument.
__ sw(a2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ sw(a3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
}
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not an array or is observed or sealed, bail out to regular
// call.
if (!object->IsJSArray() ||
!cell.is_null() ||
Handle<JSArray>::cast(object)->map()->is_observed() ||
!Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss, return_undefined, call_builtin;
Register receiver = a0;
Register scratch = a1;
Register elements = a3;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into t0 and calculate new length.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Subu(t0, t0, Operand(Smi::FromInt(1)));
__ Branch(&return_undefined, lt, t0, Operand(zero_reg));
// Get the last element.
__ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
__ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(elements, elements, t1);
__ lw(scratch, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Branch(&call_builtin, eq, scratch, Operand(t2));
// Set the array's length.
__ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole.
__ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
const int argc = arguments().immediate();
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&return_undefined);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ DropAndRet(argc + 1);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
(CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = a0;
Register index = t1;
Register result = a1;
const int argc = arguments().immediate();
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
__ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
StringCharCodeAtGenerator generator(receiver,
index,
result,
&miss, // When not a string.
&miss, // When not a number.
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
__ mov(v0, result);
__ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kNanValueRootIndex);
__ DropAndRet(argc + 1);
}
__ bind(&miss);
// Restore function name in a2.
__ li(a2, name);
HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
(CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = a0;
Register index = t1;
Register scratch = a3;
Register result = a1;
if (argc > 0) {
__ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
StringCharAtGenerator generator(receiver,
index,
scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
__ mov(v0, result);
__ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ DropAndRet(argc + 1);
}
__ bind(&miss);
// Restore function name in a2.
__ li(a2, name);
HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the char code argument.
Register code = a1;
__ lw(code, MemOperand(sp, 0 * kPointerSize));
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ And(code, code, Operand(Smi::FromInt(0xffff)));
StringCharFromCodeGenerator generator(code, v0);
generator.GenerateFast(masm());
__ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into v0.
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
__ SmiTst(v0, t0);
__ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
// If fpu is enabled, we use the floor instruction.
// Load the HeapNumber value.
__ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
// Backup FCSR.
__ cfc1(a3, FCSR);
// Clearing FCSR clears the exception mask with no side-effects.
__ ctc1(zero_reg, FCSR);
// Convert the argument to an integer.
__ floor_w_d(f0, f0);
// Start checking for special cases.
// Get the argument exponent and clear the sign bit.
__ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
__ And(t2, t1, Operand(~HeapNumber::kSignMask));
__ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
// Retrieve FCSR and check for fpu errors.
__ cfc1(t5, FCSR);
__ And(t5, t5, Operand(kFCSRExceptionFlagMask));
__ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
// Check for NaN, Infinity, and -Infinity.
// They are invariant through a Math.Floor call, so just
// return the original argument.
__ Subu(t3, t2, Operand(HeapNumber::kExponentMask
>> HeapNumber::kMantissaBitsInTopWord));
__ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
// We had an overflow or underflow in the conversion. Check if we
// have a big exponent.
// If greater or equal, the argument is already round and in v0.
__ Branch(&restore_fcsr_and_return, ge, t3,
Operand(HeapNumber::kMantissaBits));
__ Branch(&wont_fit_smi);
__ bind(&no_fpu_error);
// Move the result back to v0.
__ mfc1(v0, f0);
// Check if the result fits into a smi.
__ Addu(a1, v0, Operand(0x40000000));
__ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
// Tag the result.
STATIC_ASSERT(kSmiTag == 0);
__ sll(v0, v0, kSmiTagSize);
// Check for -0.
__ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
// t1 already holds the HeapNumber exponent.
__ And(t0, t1, Operand(HeapNumber::kSignMask));
// If our HeapNumber is negative it was -0, so load its address and return.
// Else v0 is loaded with 0, so we can also just return.
__ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
__ bind(&restore_fcsr_and_return);
// Restore FCSR and return.
__ ctc1(a3, FCSR);
__ DropAndRet(argc + 1);
__ bind(&wont_fit_smi);
// Restore FCSR and fall to slow case.
__ ctc1(a3, FCSR);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
if (!cell.is_null()) {
ASSERT(cell->value() == *function);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into v0.
__ lw(v0, MemOperand(sp, 0 * kPointerSize));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(v0, ¬_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
__ sra(t0, v0, kBitsPerInt - 1);
__ Xor(a1, v0, t0);
// Add 1 or do nothing depending on the sign of the argument.
__ Subu(v0, a1, t0);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ Branch(&slow, lt, v0, Operand(zero_reg));
// Smi case done.
__ DropAndRet(argc + 1);
// Check if the argument is a heap number and load its exponent and
// sign.
__ bind(¬_smi);
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
__ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
__ And(t0, a1, Operand(HeapNumber::kSignMask));
__ Branch(&negative_sign, ne, t0, Operand(zero_reg));
__ DropAndRet(argc + 1);
// If the argument is negative, clear the sign, and return a new
// number.
__ bind(&negative_sign);
__ Xor(a1, a1, Operand(HeapNumber::kSignMask));
__ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t0, t1, t2, &slow);
__ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ DropAndRet(argc + 1);
__ bind(&slow);
// We do not have to patch the receiver because the function makes no use of
// it.
GenerateJumpFunctionIgnoreReceiver(function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
}
Handle<Code> CallStubCompiler::CompileFastApiCall(
const CallOptimization& optimization,
Handle<Object> object,
Handle<JSObject> holder,
Handle<Cell> cell,
Handle<JSFunction> function,
Handle<String> name) {
Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
if (object->IsGlobalObject()) return Handle<Code>::null();
if (!cell.is_null()) return Handle<Code>::null();
if (!object->IsJSObject()) return Handle<Code>::null();
int depth = optimization.GetPrototypeDepthOfExpectedType(
Handle<JSObject>::cast(object), holder);
if (depth == kInvalidProtoDepth) return Handle<Code>::null();
Label miss, miss_before_stack_reserved;
GenerateNameCheck(name, &miss_before_stack_reserved);
// Get the receiver from the stack.
const int argc = arguments().immediate();
__ lw(a1, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
__ JumpIfSmi(a1, &miss_before_stack_reserved);
__ IncrementCounter(counters->call_const(), 1, a0, a3);
__ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
ReserveSpaceForFastApiCall(masm(), a0);
// Check that the maps haven't changed and find a Holder as a side effect.
CheckPrototypes(
IC::CurrentTypeOf(object, isolate()),
a1, holder, a0, a3, t0, name, depth, &miss);
GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
HandlerFrontendFooter(&miss_before_stack_reserved);
// Return the generated code.
return GetCode(function);
}
void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
// Check that the object is a boolean.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(&success, eq, object, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(miss, ne, object, Operand(at));
__ bind(&success);
}
void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
if (object->IsGlobalObject()) {
const int argc = arguments().immediate();
const int receiver_offset = argc * kPointerSize;
__ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
__ sw(a3, MemOperand(sp, receiver_offset));
}
}
Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
Handle<JSObject> holder,
Handle<Name> name,
CheckType check,
Label* miss) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
GenerateNameCheck(name, miss);
Register reg = a0;
// Get the receiver from the stack.
const int argc = arguments().immediate();
const int receiver_offset = argc * kPointerSize;
__ lw(a0, MemOperand(sp, receiver_offset));
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
__ JumpIfSmi(a0, miss);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
switch (check) {
case RECEIVER_MAP_CHECK:
__ IncrementCounter(isolate()->counters()->call_const(), 1, a1, a3);
// Check that the maps haven't changed.
reg = CheckPrototypes(
IC::CurrentTypeOf(object, isolate()),
reg, holder, a1, a3, t0, name, miss);
break;
case STRING_CHECK: {
// Check that the object is a string.
__ GetObjectType(reg, a3, a3);
__ Branch(miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, a1, miss);
break;
}
case SYMBOL_CHECK: {
// Check that the object is a symbol.
__ GetObjectType(reg, a1, a3);
__ Branch(miss, ne, a3, Operand(SYMBOL_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::SYMBOL_FUNCTION_INDEX, a1, miss);
break;
}
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(reg, &fast);
__ GetObjectType(reg, a3, a3);
__ Branch(miss, ne, a3, Operand(HEAP_NUMBER_TYPE));
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, a1, miss);
break;
}
case BOOLEAN_CHECK: {
GenerateBooleanCheck(reg, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, a1, miss);
break;
}
}
if (check != RECEIVER_MAP_CHECK) {
Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
reg = CheckPrototypes(
IC::CurrentTypeOf(prototype, isolate()),
a1, holder, a1, a3, t0, name, miss);
}
return reg;
}
void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
Register function,
Label* miss) {
ASSERT(function.is(a1));
// Check that the function really is a function.
GenerateFunctionCheck(function, a3, miss);
PatchGlobalProxy(object);
// Invoke the function.
__ InvokeFunction(a1, arguments(), JUMP_FUNCTION,
NullCallWrapper(), call_kind());
}
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
Label miss;
GenerateNameCheck(name, &miss);
// Get the number of arguments.
const int argc = arguments().immediate();
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
__ lw(a1, MemOperand(sp, argc * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), a2, extra_state());
compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
&miss);
// Move returned value, the function to call, to a1.
__ mov(a1, v0);
// Restore receiver.
__ lw(a0, MemOperand(sp, argc * kPointerSize));
GenerateJumpFunction(object, a1, &miss);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::FAST, name);
}
Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<JSObject> object,
Handle<GlobalObject> holder,
Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
object, holder, cell, function, Handle<String>::cast(name),
Code::NORMAL);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}
Label miss;
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Potentially loads a closure that matches the shared function info of the
// function, rather than function.
GenerateLoadFunctionFromCell(cell, function, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
GenerateJumpFunction(object, a1, function);
HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::NORMAL, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
receiver(), holder, name);
// Stub never generated for non-global objects that require access
// checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(receiver()); // Receiver.
__ li(at, Operand(callback)); // Callback info.
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
#undef __
#define __ ACCESS_MASM(masm)
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
// -- a2 : name
// -- ra : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(a0);
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
__ push(a1);
__ push(a0);
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
}
// We have to return the passed value, not the return value of the setter.
__ pop(v0);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
}
#undef __
#define __ ACCESS_MASM(masm())
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
Label miss;
// Check that the map of the object hasn't changed.
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
}
// Stub is never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
// Return undefined if maps of the full prototype chain is still the same.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ Ret();
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Register* LoadStubCompiler::registers() {
// receiver, name, scratch1, scratch2, scratch3, scratch4.
static Register registers[] = { a0, a2, a3, a1, t0, t1 };
return registers;
}
Register* KeyedLoadStubCompiler::registers() {
// receiver, name, scratch1, scratch2, scratch3, scratch4.
static Register registers[] = { a1, a0, a2, a3, t0, t1 };
return registers;
}
Register* StoreStubCompiler::registers() {
// receiver, name, value, scratch1, scratch2, scratch3.
static Register registers[] = { a1, a2, a0, a3, t0, t1 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
// receiver, name, value, scratch1, scratch2, scratch3.
static Register registers[] = { a2, a1, a0, a3, t0, t1 };
return registers;
}
void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
Register name_reg,
Label* miss) {
__ Branch(miss, ne, name_reg, Operand(name));
}
void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
Register name_reg,
Label* miss) {
__ Branch(miss, ne, name_reg, Operand(name));
}
#undef __
#define __ ACCESS_MASM(masm)
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- a0 : receiver
// -- a2 : name
// -- ra : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
}
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
}
#undef __
#define __ ACCESS_MASM(masm())
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Type> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
Label miss;
HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ li(a3, Operand(cell));
__ lw(t0, FieldMemOperand(a3, Cell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&miss, eq, t0, Operand(at));
}
HandlerFrontendFooter(name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
IcCheckType check) {
Label miss;
if (check == PROPERTY) {
GenerateNameCheck(name, this->name(), &miss);
}
Label number_case;
Register match = scratch1();
Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
Register map_reg = scratch2();
int receiver_count = types->length();
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
Handle<Type> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match.
// Separate compare from branch, to provide path for above JumpIfSmi().
__ Subu(match, map_reg, Operand(map));
if (type->Is(Type::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
eq, match, Operand(zero_reg));
}
}
ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps) {
Label miss;
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
__ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
scratch1(), Operand(receiver_maps->at(i)));
} else {
Label next_map;
__ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
__ li(transition_map(), Operand(transitioned_maps->at(i)));
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
}
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
return GetICCode(
kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
#undef __
#define __ ACCESS_MASM(masm)
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Label slow, miss;
Register key = a0;
Register receiver = a1;
__ JumpIfNotSmi(key, &miss);
__ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ sra(a2, a0, kSmiTagSize);
__ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
__ Ret();
// Slow case, key and receiver still in a0 and a1.
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, a2, a3);
// Entry registers are intact.
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
__ bind(&miss);
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS