HELLO·Android
系统源代码
IT资讯
技术文章
我的收藏
注册
登录
-
我收藏的文章
创建代码块
我的代码块
我的账号
Lollipop
|
5.0.1_r1
下载
查看原文件
收藏
根目录
external
chromium_org
v8
src
objects-inl.h
// Copyright 2012 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // Review notes: // // - The use of macros in these inline functions may seem superfluous // but it is absolutely needed to make sure gcc generates optimal // code. gcc is not happy when attempting to inline too deep. // #ifndef V8_OBJECTS_INL_H_ #define V8_OBJECTS_INL_H_ #include "src/base/atomicops.h" #include "src/elements.h" #include "src/objects.h" #include "src/contexts.h" #include "src/conversions-inl.h" #include "src/field-index-inl.h" #include "src/heap.h" #include "src/isolate.h" #include "src/heap-inl.h" #include "src/property.h" #include "src/spaces.h" #include "src/store-buffer.h" #include "src/v8memory.h" #include "src/factory.h" #include "src/incremental-marking.h" #include "src/transitions-inl.h" #include "src/objects-visiting.h" #include "src/lookup.h" namespace v8 { namespace internal { PropertyDetails::PropertyDetails(Smi* smi) { value_ = smi->value(); } Smi* PropertyDetails::AsSmi() const { // Ensure the upper 2 bits have the same value by sign extending it. This is // necessary to be able to use the 31st bit of the property details. int value = value_ << 1; return Smi::FromInt(value >> 1); } PropertyDetails PropertyDetails::AsDeleted() const { Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1)); return PropertyDetails(smi); } #define TYPE_CHECKER(type, instancetype) \ bool Object::Is##type() { \ return Object::IsHeapObject() && \ HeapObject::cast(this)->map()->instance_type() == instancetype; \ } #define CAST_ACCESSOR(type) \ type* type::cast(Object* object) { \ SLOW_ASSERT(object->Is##type()); \ return reinterpret_cast
(object); \ } #define INT_ACCESSORS(holder, name, offset) \ int holder::name() { return READ_INT_FIELD(this, offset); } \ void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); } #define ACCESSORS(holder, name, type, offset) \ type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \ void holder::set_##name(type* value, WriteBarrierMode mode) { \ WRITE_FIELD(this, offset, value); \ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ } // Getter that returns a tagged Smi and setter that writes a tagged Smi. #define ACCESSORS_TO_SMI(holder, name, offset) \ Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \ void holder::set_##name(Smi* value, WriteBarrierMode mode) { \ WRITE_FIELD(this, offset, value); \ } // Getter that returns a Smi as an int and writes an int as a Smi. #define SMI_ACCESSORS(holder, name, offset) \ int holder::name() { \ Object* value = READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ void holder::set_##name(int value) { \ WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ int holder::synchronized_##name() { \ Object* value = ACQUIRE_READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ void holder::synchronized_set_##name(int value) { \ RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } #define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ int holder::nobarrier_##name() { \ Object* value = NOBARRIER_READ_FIELD(this, offset); \ return Smi::cast(value)->value(); \ } \ void holder::nobarrier_set_##name(int value) { \ NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ } #define BOOL_GETTER(holder, field, name, offset) \ bool holder::name() { \ return BooleanBit::get(field(), offset); \ } \ #define BOOL_ACCESSORS(holder, field, name, offset) \ bool holder::name() { \ return BooleanBit::get(field(), offset); \ } \ void holder::set_##name(bool value) { \ set_##field(BooleanBit::set(field(), offset, value)); \ } bool Object::IsFixedArrayBase() { return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() || IsFixedTypedArrayBase() || IsExternalArray(); } // External objects are not extensible, so the map check is enough. bool Object::IsExternal() { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->external_map(); } bool Object::IsAccessorInfo() { return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo(); } bool Object::IsSmi() { return HAS_SMI_TAG(this); } bool Object::IsHeapObject() { return Internals::HasHeapObjectTag(this); } TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE) TYPE_CHECKER(Symbol, SYMBOL_TYPE) bool Object::IsString() { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE; } bool Object::IsName() { return IsString() || IsSymbol(); } bool Object::IsUniqueName() { return IsInternalizedString() || IsSymbol(); } bool Object::IsSpecObject() { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE; } bool Object::IsSpecFunction() { if (!Object::IsHeapObject()) return false; InstanceType type = HeapObject::cast(this)->map()->instance_type(); return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE; } bool Object::IsTemplateInfo() { return IsObjectTemplateInfo() || IsFunctionTemplateInfo(); } bool Object::IsInternalizedString() { if (!this->IsHeapObject()) return false; uint32_t type = HeapObject::cast(this)->map()->instance_type(); STATIC_ASSERT(kNotInternalizedTag != 0); return (type & (kIsNotStringMask | kIsNotInternalizedMask)) == (kStringTag | kInternalizedTag); } bool Object::IsConsString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsCons(); } bool Object::IsSlicedString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsSliced(); } bool Object::IsSeqString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential(); } bool Object::IsSeqOneByteString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && String::cast(this)->IsOneByteRepresentation(); } bool Object::IsSeqTwoByteString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && String::cast(this)->IsTwoByteRepresentation(); } bool Object::IsExternalString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal(); } bool Object::IsExternalAsciiString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && String::cast(this)->IsOneByteRepresentation(); } bool Object::IsExternalTwoByteString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && String::cast(this)->IsTwoByteRepresentation(); } bool Object::HasValidElements() { // Dictionary is covered under FixedArray. return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray() || IsFixedTypedArrayBase(); } Handle
Object::NewStorageFor(Isolate* isolate, Handle
object, Representation representation) { if (representation.IsSmi() && object->IsUninitialized()) { return handle(Smi::FromInt(0), isolate); } if (!representation.IsDouble()) return object; if (object->IsUninitialized()) { return isolate->factory()->NewHeapNumber(0); } return isolate->factory()->NewHeapNumber(object->Number()); } StringShape::StringShape(String* str) : type_(str->map()->instance_type()) { set_valid(); ASSERT((type_ & kIsNotStringMask) == kStringTag); } StringShape::StringShape(Map* map) : type_(map->instance_type()) { set_valid(); ASSERT((type_ & kIsNotStringMask) == kStringTag); } StringShape::StringShape(InstanceType t) : type_(static_cast
(t)) { set_valid(); ASSERT((type_ & kIsNotStringMask) == kStringTag); } bool StringShape::IsInternalized() { ASSERT(valid()); STATIC_ASSERT(kNotInternalizedTag != 0); return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) == (kStringTag | kInternalizedTag); } bool String::IsOneByteRepresentation() { uint32_t type = map()->instance_type(); return (type & kStringEncodingMask) == kOneByteStringTag; } bool String::IsTwoByteRepresentation() { uint32_t type = map()->instance_type(); return (type & kStringEncodingMask) == kTwoByteStringTag; } bool String::IsOneByteRepresentationUnderneath() { uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); ASSERT(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { case kOneByteStringTag: return true; case kTwoByteStringTag: return false; default: // Cons or sliced string. Need to go deeper. return GetUnderlying()->IsOneByteRepresentation(); } } bool String::IsTwoByteRepresentationUnderneath() { uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); ASSERT(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { case kOneByteStringTag: return false; case kTwoByteStringTag: return true; default: // Cons or sliced string. Need to go deeper. return GetUnderlying()->IsTwoByteRepresentation(); } } bool String::HasOnlyOneByteChars() { uint32_t type = map()->instance_type(); return (type & kOneByteDataHintMask) == kOneByteDataHintTag || IsOneByteRepresentation(); } bool StringShape::IsCons() { return (type_ & kStringRepresentationMask) == kConsStringTag; } bool StringShape::IsSliced() { return (type_ & kStringRepresentationMask) == kSlicedStringTag; } bool StringShape::IsIndirect() { return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag; } bool StringShape::IsExternal() { return (type_ & kStringRepresentationMask) == kExternalStringTag; } bool StringShape::IsSequential() { return (type_ & kStringRepresentationMask) == kSeqStringTag; } StringRepresentationTag StringShape::representation_tag() { uint32_t tag = (type_ & kStringRepresentationMask); return static_cast
(tag); } uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; } uint32_t StringShape::full_representation_tag() { return (type_ & (kStringRepresentationMask | kStringEncodingMask)); } STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) == Internals::kFullStringRepresentationMask); STATIC_ASSERT(static_cast
(kStringEncodingMask) == Internals::kStringEncodingMask); bool StringShape::IsSequentialAscii() { return full_representation_tag() == (kSeqStringTag | kOneByteStringTag); } bool StringShape::IsSequentialTwoByte() { return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag); } bool StringShape::IsExternalAscii() { return full_representation_tag() == (kExternalStringTag | kOneByteStringTag); } STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) == Internals::kExternalAsciiRepresentationTag); STATIC_ASSERT(v8::String::ASCII_ENCODING == kOneByteStringTag); bool StringShape::IsExternalTwoByte() { return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag); } STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) == Internals::kExternalTwoByteRepresentationTag); STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag); uc32 FlatStringReader::Get(int index) { ASSERT(0 <= index && index <= length_); if (is_ascii_) { return static_cast
(start_)[index]; } else { return static_cast
(start_)[index]; } } Handle
StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } Handle
MapCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } Handle
CompilationCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } Handle
CodeCacheHashTableShape::AsHandle(Isolate* isolate, HashTableKey* key) { return key->AsHandle(isolate); } template
class SequentialStringKey : public HashTableKey { public: explicit SequentialStringKey(Vector
string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } virtual uint32_t Hash() V8_OVERRIDE { hash_field_ = StringHasher::HashSequentialString
(string_.start(), string_.length(), seed_); uint32_t result = hash_field_ >> String::kHashShift; ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. return result; } virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } Vector
string_; uint32_t hash_field_; uint32_t seed_; }; class OneByteStringKey : public SequentialStringKey
{ public: OneByteStringKey(Vector
str, uint32_t seed) : SequentialStringKey
(str, seed) { } virtual bool IsMatch(Object* string) V8_OVERRIDE { return String::cast(string)->IsOneByteEqualTo(string_); } virtual Handle
AsHandle(Isolate* isolate) V8_OVERRIDE; }; template
class SubStringKey : public HashTableKey { public: SubStringKey(Handle
string, int from, int length) : string_(string), from_(from), length_(length) { if (string_->IsSlicedString()) { string_ = Handle
(Unslice(*string_, &from_)); } ASSERT(string_->IsSeqString() || string->IsExternalString()); } virtual uint32_t Hash() V8_OVERRIDE { ASSERT(length_ >= 0); ASSERT(from_ + length_ <= string_->length()); const Char* chars = GetChars() + from_; hash_field_ = StringHasher::HashSequentialString( chars, length_, string_->GetHeap()->HashSeed()); uint32_t result = hash_field_ >> String::kHashShift; ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. return result; } virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } virtual bool IsMatch(Object* string) V8_OVERRIDE; virtual Handle
AsHandle(Isolate* isolate) V8_OVERRIDE; private: const Char* GetChars(); String* Unslice(String* string, int* offset) { while (string->IsSlicedString()) { SlicedString* sliced = SlicedString::cast(string); *offset += sliced->offset(); string = sliced->parent(); } return string; } Handle
string_; int from_; int length_; uint32_t hash_field_; }; class TwoByteStringKey : public SequentialStringKey
{ public: explicit TwoByteStringKey(Vector
str, uint32_t seed) : SequentialStringKey
(str, seed) { } virtual bool IsMatch(Object* string) V8_OVERRIDE { return String::cast(string)->IsTwoByteEqualTo(string_); } virtual Handle
AsHandle(Isolate* isolate) V8_OVERRIDE; }; // Utf8StringKey carries a vector of chars as key. class Utf8StringKey : public HashTableKey { public: explicit Utf8StringKey(Vector
string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } virtual bool IsMatch(Object* string) V8_OVERRIDE { return String::cast(string)->IsUtf8EqualTo(string_); } virtual uint32_t Hash() V8_OVERRIDE { if (hash_field_ != 0) return hash_field_ >> String::kHashShift; hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_); uint32_t result = hash_field_ >> String::kHashShift; ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. return result; } virtual uint32_t HashForObject(Object* other) V8_OVERRIDE { return String::cast(other)->Hash(); } virtual Handle
AsHandle(Isolate* isolate) V8_OVERRIDE { if (hash_field_ == 0) Hash(); return isolate->factory()->NewInternalizedStringFromUtf8( string_, chars_, hash_field_); } Vector
string_; uint32_t hash_field_; int chars_; // Caches the number of characters when computing the hash code. uint32_t seed_; }; bool Object::IsNumber() { return IsSmi() || IsHeapNumber(); } TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE) TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE) bool Object::IsFiller() { if (!Object::IsHeapObject()) return false; InstanceType instance_type = HeapObject::cast(this)->map()->instance_type(); return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE; } bool Object::IsExternalArray() { if (!Object::IsHeapObject()) return false; InstanceType instance_type = HeapObject::cast(this)->map()->instance_type(); return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE && instance_type <= LAST_EXTERNAL_ARRAY_TYPE); } #define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \ TYPE_CHECKER(External##Type##Array, EXTERNAL_##TYPE##_ARRAY_TYPE) \ TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE) TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER) #undef TYPED_ARRAY_TYPE_CHECKER bool Object::IsFixedTypedArrayBase() { if (!Object::IsHeapObject()) return false; InstanceType instance_type = HeapObject::cast(this)->map()->instance_type(); return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE && instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE); } bool Object::IsJSReceiver() { STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); return IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE; } bool Object::IsJSObject() { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); return IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE; } bool Object::IsJSProxy() { if (!Object::IsHeapObject()) return false; return HeapObject::cast(this)->map()->IsJSProxyMap(); } TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE) TYPE_CHECKER(JSSet, JS_SET_TYPE) TYPE_CHECKER(JSMap, JS_MAP_TYPE) TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE) TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE) TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE) TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE) TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) TYPE_CHECKER(Map, MAP_TYPE) TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE) TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE) TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE) bool Object::IsJSWeakCollection() { return IsJSWeakMap() || IsJSWeakSet(); } bool Object::IsDescriptorArray() { return IsFixedArray(); } bool Object::IsTransitionArray() { return IsFixedArray(); } bool Object::IsDeoptimizationInputData() { // Must be a fixed array. if (!IsFixedArray()) return false; // There's no sure way to detect the difference between a fixed array and // a deoptimization data array. Since this is used for asserts we can // check that the length is zero or else the fixed size plus a multiple of // the entry size. int length = FixedArray::cast(this)->length(); if (length == 0) return true; length -= DeoptimizationInputData::kFirstDeoptEntryIndex; return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0; } bool Object::IsDeoptimizationOutputData() { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a deoptimization data array. Since this is used for asserts we can check // that the length is plausible though. if (FixedArray::cast(this)->length() % 2 != 0) return false; return true; } bool Object::IsDependentCode() { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and // a dependent codes array. return true; } bool Object::IsContext() { if (!Object::IsHeapObject()) return false; Map* map = HeapObject::cast(this)->map(); Heap* heap = map->GetHeap(); return (map == heap->function_context_map() || map == heap->catch_context_map() || map == heap->with_context_map() || map == heap->native_context_map() || map == heap->block_context_map() || map == heap->module_context_map() || map == heap->global_context_map()); } bool Object::IsNativeContext() { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->native_context_map(); } bool Object::IsScopeInfo() { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->scope_info_map(); } TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE) template <> inline bool Is
(Object* obj) { return obj->IsJSFunction(); } TYPE_CHECKER(Code, CODE_TYPE) TYPE_CHECKER(Oddball, ODDBALL_TYPE) TYPE_CHECKER(Cell, CELL_TYPE) TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE) TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE) TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE) TYPE_CHECKER(JSModule, JS_MODULE_TYPE) TYPE_CHECKER(JSValue, JS_VALUE_TYPE) TYPE_CHECKER(JSDate, JS_DATE_TYPE) TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) bool Object::IsStringWrapper() { return IsJSValue() && JSValue::cast(this)->value()->IsString(); } TYPE_CHECKER(Foreign, FOREIGN_TYPE) bool Object::IsBoolean() { return IsOddball() && ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0); } TYPE_CHECKER(JSArray, JS_ARRAY_TYPE) TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE) TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE) bool Object::IsJSArrayBufferView() { return IsJSDataView() || IsJSTypedArray(); } TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE) template <> inline bool Is
(Object* obj) { return obj->IsJSArray(); } bool Object::IsHashTable() { return Object::IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->hash_table_map(); } bool Object::IsDictionary() { return IsHashTable() && this != HeapObject::cast(this)->GetHeap()->string_table(); } bool Object::IsStringTable() { return IsHashTable(); } bool Object::IsJSFunctionResultCache() { if (!IsFixedArray()) return false; FixedArray* self = FixedArray::cast(this); int length = self->length(); if (length < JSFunctionResultCache::kEntriesIndex) return false; if ((length - JSFunctionResultCache::kEntriesIndex) % JSFunctionResultCache::kEntrySize != 0) { return false; } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { reinterpret_cast
(this)-> JSFunctionResultCacheVerify(); } #endif return true; } bool Object::IsNormalizedMapCache() { return NormalizedMapCache::IsNormalizedMapCache(this); } int NormalizedMapCache::GetIndex(Handle
map) { return map->Hash() % NormalizedMapCache::kEntries; } bool NormalizedMapCache::IsNormalizedMapCache(Object* obj) { if (!obj->IsFixedArray()) return false; if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) { return false; } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { reinterpret_cast
(obj)->NormalizedMapCacheVerify(); } #endif return true; } bool Object::IsCompilationCacheTable() { return IsHashTable(); } bool Object::IsCodeCacheHashTable() { return IsHashTable(); } bool Object::IsPolymorphicCodeCacheHashTable() { return IsHashTable(); } bool Object::IsMapCache() { return IsHashTable(); } bool Object::IsObjectHashTable() { return IsHashTable(); } bool Object::IsOrderedHashTable() { return IsHeapObject() && HeapObject::cast(this)->map() == HeapObject::cast(this)->GetHeap()->ordered_hash_table_map(); } bool Object::IsPrimitive() { return IsOddball() || IsNumber() || IsString(); } bool Object::IsJSGlobalProxy() { bool result = IsHeapObject() && (HeapObject::cast(this)->map()->instance_type() == JS_GLOBAL_PROXY_TYPE); ASSERT(!result || HeapObject::cast(this)->map()->is_access_check_needed()); return result; } bool Object::IsGlobalObject() { if (!IsHeapObject()) return false; InstanceType type = HeapObject::cast(this)->map()->instance_type(); return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE; } TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE) bool Object::IsUndetectableObject() { return IsHeapObject() && HeapObject::cast(this)->map()->is_undetectable(); } bool Object::IsAccessCheckNeeded() { if (!IsHeapObject()) return false; if (IsJSGlobalProxy()) { JSGlobalProxy* proxy = JSGlobalProxy::cast(this); GlobalObject* global = proxy->GetIsolate()->context()->global_object(); return proxy->IsDetachedFrom(global); } return HeapObject::cast(this)->map()->is_access_check_needed(); } bool Object::IsStruct() { if (!IsHeapObject()) return false; switch (HeapObject::cast(this)->map()->instance_type()) { #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true; STRUCT_LIST(MAKE_STRUCT_CASE) #undef MAKE_STRUCT_CASE default: return false; } } #define MAKE_STRUCT_PREDICATE(NAME, Name, name) \ bool Object::Is##Name() { \ return Object::IsHeapObject() \ && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \ } STRUCT_LIST(MAKE_STRUCT_PREDICATE) #undef MAKE_STRUCT_PREDICATE bool Object::IsUndefined() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined; } bool Object::IsNull() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull; } bool Object::IsTheHole() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole; } bool Object::IsException() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException; } bool Object::IsUninitialized() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized; } bool Object::IsTrue() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue; } bool Object::IsFalse() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse; } bool Object::IsArgumentsMarker() { return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker; } double Object::Number() { ASSERT(IsNumber()); return IsSmi() ? static_cast
(reinterpret_cast
(this)->value()) : reinterpret_cast
(this)->value(); } bool Object::IsNaN() { return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value()); } MaybeHandle
Object::ToSmi(Isolate* isolate, Handle
object) { if (object->IsSmi()) return Handle
::cast(object); if (object->IsHeapNumber()) { double value = Handle
::cast(object)->value(); int int_value = FastD2I(value); if (value == FastI2D(int_value) && Smi::IsValid(int_value)) { return handle(Smi::FromInt(int_value), isolate); } } return Handle
(); } MaybeHandle
Object::ToObject(Isolate* isolate, Handle
object) { return ToObject( isolate, object, handle(isolate->context()->native_context(), isolate)); } bool Object::HasSpecificClassOf(String* name) { return this->IsJSObject() && (JSObject::cast(this)->class_name() == name); } MaybeHandle
Object::GetProperty(Handle
object, Handle
name) { LookupIterator it(object, name); return GetProperty(&it); } MaybeHandle
Object::GetElement(Isolate* isolate, Handle
object, uint32_t index) { // GetElement can trigger a getter which can cause allocation. // This was not always the case. This ASSERT is here to catch // leftover incorrect uses. ASSERT(AllowHeapAllocation::IsAllowed()); return Object::GetElementWithReceiver(isolate, object, object, index); } MaybeHandle
Object::GetPropertyOrElement(Handle
object, Handle
name) { uint32_t index; Isolate* isolate = name->GetIsolate(); if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index); return GetProperty(object, name); } MaybeHandle
Object::GetProperty(Isolate* isolate, Handle
object, const char* name) { Handle
str = isolate->factory()->InternalizeUtf8String(name); ASSERT(!str.is_null()); #ifdef DEBUG uint32_t index; // Assert that the name is not an array index. ASSERT(!str->AsArrayIndex(&index)); #endif // DEBUG return GetProperty(object, str); } MaybeHandle
JSProxy::GetElementWithHandler(Handle
proxy, Handle
receiver, uint32_t index) { return GetPropertyWithHandler( proxy, receiver, proxy->GetIsolate()->factory()->Uint32ToString(index)); } MaybeHandle
JSProxy::SetElementWithHandler(Handle
proxy, Handle
receiver, uint32_t index, Handle
value, StrictMode strict_mode) { Isolate* isolate = proxy->GetIsolate(); Handle
name = isolate->factory()->Uint32ToString(index); return SetPropertyWithHandler( proxy, receiver, name, value, NONE, strict_mode); } bool JSProxy::HasElementWithHandler(Handle
proxy, uint32_t index) { Isolate* isolate = proxy->GetIsolate(); Handle
name = isolate->factory()->Uint32ToString(index); return HasPropertyWithHandler(proxy, name); } #define FIELD_ADDR(p, offset) \ (reinterpret_cast
(p) + offset - kHeapObjectTag) #define READ_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define ACQUIRE_READ_FIELD(p, offset) \ reinterpret_cast
(base::Acquire_Load( \ reinterpret_cast
(FIELD_ADDR(p, offset)))) #define NOBARRIER_READ_FIELD(p, offset) \ reinterpret_cast
(base::NoBarrier_Load( \ reinterpret_cast
(FIELD_ADDR(p, offset)))) #define WRITE_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define RELEASE_WRITE_FIELD(p, offset, value) \ base::Release_Store( \ reinterpret_cast
(FIELD_ADDR(p, offset)), \ reinterpret_cast
(value)); #define NOBARRIER_WRITE_FIELD(p, offset, value) \ base::NoBarrier_Store( \ reinterpret_cast
(FIELD_ADDR(p, offset)), \ reinterpret_cast
(value)); #define WRITE_BARRIER(heap, object, offset, value) \ heap->incremental_marking()->RecordWrite( \ object, HeapObject::RawField(object, offset), value); \ if (heap->InNewSpace(value)) { \ heap->RecordWrite(object->address(), offset); \ } #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ if (mode == UPDATE_WRITE_BARRIER) { \ heap->incremental_marking()->RecordWrite( \ object, HeapObject::RawField(object, offset), value); \ if (heap->InNewSpace(value)) { \ heap->RecordWrite(object->address(), offset); \ } \ } #ifndef V8_TARGET_ARCH_MIPS #define READ_DOUBLE_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #else // V8_TARGET_ARCH_MIPS // Prevent gcc from using load-double (mips ldc1) on (possibly) // non-64-bit aligned HeapNumber::value. static inline double read_double_field(void* p, int offset) { union conversion { double d; uint32_t u[2]; } c; c.u[0] = (*reinterpret_cast
(FIELD_ADDR(p, offset))); c.u[1] = (*reinterpret_cast
(FIELD_ADDR(p, offset + 4))); return c.d; } #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset) #endif // V8_TARGET_ARCH_MIPS #ifndef V8_TARGET_ARCH_MIPS #define WRITE_DOUBLE_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #else // V8_TARGET_ARCH_MIPS // Prevent gcc from using store-double (mips sdc1) on (possibly) // non-64-bit aligned HeapNumber::value. static inline void write_double_field(void* p, int offset, double value) { union conversion { double d; uint32_t u[2]; } c; c.d = value; (*reinterpret_cast
(FIELD_ADDR(p, offset))) = c.u[0]; (*reinterpret_cast
(FIELD_ADDR(p, offset + 4))) = c.u[1]; } #define WRITE_DOUBLE_FIELD(p, offset, value) \ write_double_field(p, offset, value) #endif // V8_TARGET_ARCH_MIPS #define READ_INT_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define WRITE_INT_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INTPTR_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define WRITE_INTPTR_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_UINT32_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define WRITE_UINT32_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INT32_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define WRITE_INT32_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_INT64_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define WRITE_INT64_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_SHORT_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define WRITE_SHORT_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define READ_BYTE_FIELD(p, offset) \ (*reinterpret_cast
(FIELD_ADDR(p, offset))) #define NOBARRIER_READ_BYTE_FIELD(p, offset) \ static_cast
(base::NoBarrier_Load( \ reinterpret_cast
(FIELD_ADDR(p, offset)))) #define WRITE_BYTE_FIELD(p, offset, value) \ (*reinterpret_cast
(FIELD_ADDR(p, offset)) = value) #define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ base::NoBarrier_Store( \ reinterpret_cast
(FIELD_ADDR(p, offset)), \ static_cast
(value)); Object** HeapObject::RawField(HeapObject* obj, int byte_offset) { return &READ_FIELD(obj, byte_offset); } int Smi::value() { return Internals::SmiValue(this); } Smi* Smi::FromInt(int value) { ASSERT(Smi::IsValid(value)); return reinterpret_cast
(Internals::IntToSmi(value)); } Smi* Smi::FromIntptr(intptr_t value) { ASSERT(Smi::IsValid(value)); int smi_shift_bits = kSmiTagSize + kSmiShiftSize; return reinterpret_cast
((value << smi_shift_bits) | kSmiTag); } bool Smi::IsValid(intptr_t value) { bool result = Internals::IsValidSmi(value); ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue); return result; } MapWord MapWord::FromMap(Map* map) { return MapWord(reinterpret_cast
(map)); } Map* MapWord::ToMap() { return reinterpret_cast
(value_); } bool MapWord::IsForwardingAddress() { return HAS_SMI_TAG(reinterpret_cast
(value_)); } MapWord MapWord::FromForwardingAddress(HeapObject* object) { Address raw = reinterpret_cast
(object) - kHeapObjectTag; return MapWord(reinterpret_cast
(raw)); } HeapObject* MapWord::ToForwardingAddress() { ASSERT(IsForwardingAddress()); return HeapObject::FromAddress(reinterpret_cast
(value_)); } #ifdef VERIFY_HEAP void HeapObject::VerifyObjectField(int offset) { VerifyPointer(READ_FIELD(this, offset)); } void HeapObject::VerifySmiField(int offset) { CHECK(READ_FIELD(this, offset)->IsSmi()); } #endif Heap* HeapObject::GetHeap() { Heap* heap = MemoryChunk::FromAddress(reinterpret_cast
(this))->heap(); SLOW_ASSERT(heap != NULL); return heap; } Isolate* HeapObject::GetIsolate() { return GetHeap()->isolate(); } Map* HeapObject::map() { #ifdef DEBUG // Clear mark potentially added by PathTracer. uintptr_t raw_value = map_word().ToRawValue() & ~static_cast
(PathTracer::kMarkTag); return MapWord::FromRawValue(raw_value).ToMap(); #else return map_word().ToMap(); #endif } void HeapObject::set_map(Map* value) { set_map_word(MapWord::FromMap(value)); if (value != NULL) { // TODO(1600) We are passing NULL as a slot because maps can never be on // evacuation candidate. value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); } } Map* HeapObject::synchronized_map() { return synchronized_map_word().ToMap(); } void HeapObject::synchronized_set_map(Map* value) { synchronized_set_map_word(MapWord::FromMap(value)); if (value != NULL) { // TODO(1600) We are passing NULL as a slot because maps can never be on // evacuation candidate. value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); } } void HeapObject::synchronized_set_map_no_write_barrier(Map* value) { synchronized_set_map_word(MapWord::FromMap(value)); } // Unsafe accessor omitting write barrier. void HeapObject::set_map_no_write_barrier(Map* value) { set_map_word(MapWord::FromMap(value)); } MapWord HeapObject::map_word() { return MapWord( reinterpret_cast
(NOBARRIER_READ_FIELD(this, kMapOffset))); } void HeapObject::set_map_word(MapWord map_word) { NOBARRIER_WRITE_FIELD( this, kMapOffset, reinterpret_cast
(map_word.value_)); } MapWord HeapObject::synchronized_map_word() { return MapWord( reinterpret_cast
(ACQUIRE_READ_FIELD(this, kMapOffset))); } void HeapObject::synchronized_set_map_word(MapWord map_word) { RELEASE_WRITE_FIELD( this, kMapOffset, reinterpret_cast
(map_word.value_)); } HeapObject* HeapObject::FromAddress(Address address) { ASSERT_TAG_ALIGNED(address); return reinterpret_cast
(address + kHeapObjectTag); } Address HeapObject::address() { return reinterpret_cast
(this) - kHeapObjectTag; } int HeapObject::Size() { return SizeFromMap(map()); } void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) { v->VisitPointers(reinterpret_cast
(FIELD_ADDR(this, start)), reinterpret_cast
(FIELD_ADDR(this, end))); } void HeapObject::IteratePointer(ObjectVisitor* v, int offset) { v->VisitPointer(reinterpret_cast
(FIELD_ADDR(this, offset))); } void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) { v->VisitNextCodeLink(reinterpret_cast
(FIELD_ADDR(this, offset))); } double HeapNumber::value() { return READ_DOUBLE_FIELD(this, kValueOffset); } void HeapNumber::set_value(double value) { WRITE_DOUBLE_FIELD(this, kValueOffset, value); } int HeapNumber::get_exponent() { return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >> kExponentShift) - kExponentBias; } int HeapNumber::get_sign() { return READ_INT_FIELD(this, kExponentOffset) & kSignMask; } ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset) Object** FixedArray::GetFirstElementAddress() { return reinterpret_cast
(FIELD_ADDR(this, OffsetOfElementAt(0))); } bool FixedArray::ContainsOnlySmisOrHoles() { Object* the_hole = GetHeap()->the_hole_value(); Object** current = GetFirstElementAddress(); for (int i = 0; i < length(); ++i) { Object* candidate = *current++; if (!candidate->IsSmi() && candidate != the_hole) return false; } return true; } FixedArrayBase* JSObject::elements() { Object* array = READ_FIELD(this, kElementsOffset); return static_cast
(array); } void JSObject::ValidateElements(Handle
object) { #ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { ElementsAccessor* accessor = object->GetElementsAccessor(); accessor->Validate(object); } #endif } void AllocationSite::Initialize() { set_transition_info(Smi::FromInt(0)); SetElementsKind(GetInitialFastElementsKind()); set_nested_site(Smi::FromInt(0)); set_pretenure_data(Smi::FromInt(0)); set_pretenure_create_count(Smi::FromInt(0)); set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()), SKIP_WRITE_BARRIER); } void AllocationSite::MarkZombie() { ASSERT(!IsZombie()); Initialize(); set_pretenure_decision(kZombie); } // Heuristic: We only need to create allocation site info if the boilerplate // elements kind is the initial elements kind. AllocationSiteMode AllocationSite::GetMode( ElementsKind boilerplate_elements_kind) { if (FLAG_pretenuring_call_new || IsFastSmiElementsKind(boilerplate_elements_kind)) { return TRACK_ALLOCATION_SITE; } return DONT_TRACK_ALLOCATION_SITE; } AllocationSiteMode AllocationSite::GetMode(ElementsKind from, ElementsKind to) { if (FLAG_pretenuring_call_new || (IsFastSmiElementsKind(from) && IsMoreGeneralElementsKindTransition(from, to))) { return TRACK_ALLOCATION_SITE; } return DONT_TRACK_ALLOCATION_SITE; } inline bool AllocationSite::CanTrack(InstanceType type) { if (FLAG_allocation_site_pretenuring) { return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE || type < FIRST_NONSTRING_TYPE; } return type == JS_ARRAY_TYPE; } inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup( Reason reason) { switch (reason) { case TENURING: return DependentCode::kAllocationSiteTenuringChangedGroup; break; case TRANSITIONS: return DependentCode::kAllocationSiteTransitionChangedGroup; break; } UNREACHABLE(); return DependentCode::kAllocationSiteTransitionChangedGroup; } inline void AllocationSite::set_memento_found_count(int count) { int value = pretenure_data()->value(); // Verify that we can count more mementos than we can possibly find in one // new space collection. ASSERT((GetHeap()->MaxSemiSpaceSize() / (StaticVisitorBase::kMinObjectSizeInWords * kPointerSize + AllocationMemento::kSize)) < MementoFoundCountBits::kMax); ASSERT(count < MementoFoundCountBits::kMax); set_pretenure_data( Smi::FromInt(MementoFoundCountBits::update(value, count)), SKIP_WRITE_BARRIER); } inline bool AllocationSite::IncrementMementoFoundCount() { if (IsZombie()) return false; int value = memento_found_count(); set_memento_found_count(value + 1); return memento_found_count() == kPretenureMinimumCreated; } inline void AllocationSite::IncrementMementoCreateCount() { ASSERT(FLAG_allocation_site_pretenuring); int value = memento_create_count(); set_memento_create_count(value + 1); } inline bool AllocationSite::MakePretenureDecision( PretenureDecision current_decision, double ratio, bool maximum_size_scavenge) { // Here we just allow state transitions from undecided or maybe tenure // to don't tenure, maybe tenure, or tenure. if ((current_decision == kUndecided || current_decision == kMaybeTenure)) { if (ratio >= kPretenureRatio) { // We just transition into tenure state when the semi-space was at // maximum capacity. if (maximum_size_scavenge) { set_deopt_dependent_code(true); set_pretenure_decision(kTenure); // Currently we just need to deopt when we make a state transition to // tenure. return true; } set_pretenure_decision(kMaybeTenure); } else { set_pretenure_decision(kDontTenure); } } return false; } inline bool AllocationSite::DigestPretenuringFeedback( bool maximum_size_scavenge) { bool deopt = false; int create_count = memento_create_count(); int found_count = memento_found_count(); bool minimum_mementos_created = create_count >= kPretenureMinimumCreated; double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics ? static_cast
(found_count) / create_count : 0.0; PretenureDecision current_decision = pretenure_decision(); if (minimum_mementos_created) { deopt = MakePretenureDecision( current_decision, ratio, maximum_size_scavenge); } if (FLAG_trace_pretenuring_statistics) { PrintF( "AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n", static_cast
(this), create_count, found_count, ratio, PretenureDecisionName(current_decision), PretenureDecisionName(pretenure_decision())); } // Clear feedback calculation fields until the next gc. set_memento_found_count(0); set_memento_create_count(0); return deopt; } void JSObject::EnsureCanContainHeapObjectElements(Handle
object) { JSObject::ValidateElements(object); ElementsKind elements_kind = object->map()->elements_kind(); if (!IsFastObjectElementsKind(elements_kind)) { if (IsFastHoleyElementsKind(elements_kind)) { TransitionElementsKind(object, FAST_HOLEY_ELEMENTS); } else { TransitionElementsKind(object, FAST_ELEMENTS); } } } void JSObject::EnsureCanContainElements(Handle
object, Object** objects, uint32_t count, EnsureElementsMode mode) { ElementsKind current_kind = object->map()->elements_kind(); ElementsKind target_kind = current_kind; { DisallowHeapAllocation no_allocation; ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS); bool is_holey = IsFastHoleyElementsKind(current_kind); if (current_kind == FAST_HOLEY_ELEMENTS) return; Heap* heap = object->GetHeap(); Object* the_hole = heap->the_hole_value(); for (uint32_t i = 0; i < count; ++i) { Object* current = *objects++; if (current == the_hole) { is_holey = true; target_kind = GetHoleyElementsKind(target_kind); } else if (!current->IsSmi()) { if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) { if (IsFastSmiElementsKind(target_kind)) { if (is_holey) { target_kind = FAST_HOLEY_DOUBLE_ELEMENTS; } else { target_kind = FAST_DOUBLE_ELEMENTS; } } } else if (is_holey) { target_kind = FAST_HOLEY_ELEMENTS; break; } else { target_kind = FAST_ELEMENTS; } } } } if (target_kind != current_kind) { TransitionElementsKind(object, target_kind); } } void JSObject::EnsureCanContainElements(Handle
object, Handle
elements, uint32_t length, EnsureElementsMode mode) { Heap* heap = object->GetHeap(); if (elements->map() != heap->fixed_double_array_map()) { ASSERT(elements->map() == heap->fixed_array_map() || elements->map() == heap->fixed_cow_array_map()); if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) { mode = DONT_ALLOW_DOUBLE_ELEMENTS; } Object** objects = Handle
::cast(elements)->GetFirstElementAddress(); EnsureCanContainElements(object, objects, length, mode); return; } ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS); if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) { TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS); } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) { Handle
double_array = Handle
::cast(elements); for (uint32_t i = 0; i < length; ++i) { if (double_array->is_the_hole(i)) { TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS); return; } } TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS); } } void JSObject::SetMapAndElements(Handle
object, Handle
new_map, Handle
value) { JSObject::MigrateToMap(object, new_map); ASSERT((object->map()->has_fast_smi_or_object_elements() || (*value == object->GetHeap()->empty_fixed_array())) == (value->map() == object->GetHeap()->fixed_array_map() || value->map() == object->GetHeap()->fixed_cow_array_map())); ASSERT((*value == object->GetHeap()->empty_fixed_array()) || (object->map()->has_fast_double_elements() == value->IsFixedDoubleArray())); object->set_elements(*value); } void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) { WRITE_FIELD(this, kElementsOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); } void JSObject::initialize_properties() { ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array()); } void JSObject::initialize_elements() { FixedArrayBase* elements = map()->GetInitialElements(); WRITE_FIELD(this, kElementsOffset, elements); } Handle
JSObject::ExpectedTransitionKey(Handle
map) { DisallowHeapAllocation no_gc; if (!map->HasTransitionArray()) return Handle
::null(); TransitionArray* transitions = map->transitions(); if (!transitions->IsSimpleTransition()) return Handle
::null(); int transition = TransitionArray::kSimpleTransitionIndex; PropertyDetails details = transitions->GetTargetDetails(transition); Name* name = transitions->GetKey(transition); if (details.type() != FIELD) return Handle
::null(); if (details.attributes() != NONE) return Handle
::null(); if (!name->IsString()) return Handle
::null(); return Handle
(String::cast(name)); } Handle
JSObject::ExpectedTransitionTarget(Handle
map) { ASSERT(!ExpectedTransitionKey(map).is_null()); return Handle
(map->transitions()->GetTarget( TransitionArray::kSimpleTransitionIndex)); } Handle
JSObject::FindTransitionToField(Handle
map, Handle
key) { DisallowHeapAllocation no_allocation; if (!map->HasTransitionArray()) return Handle
::null(); TransitionArray* transitions = map->transitions(); int transition = transitions->Search(*key); if (transition == TransitionArray::kNotFound) return Handle
::null(); PropertyDetails target_details = transitions->GetTargetDetails(transition); if (target_details.type() != FIELD) return Handle
::null(); if (target_details.attributes() != NONE) return Handle
::null(); return Handle
(transitions->GetTarget(transition)); } ACCESSORS(Oddball, to_string, String, kToStringOffset) ACCESSORS(Oddball, to_number, Object, kToNumberOffset) byte Oddball::kind() { return Smi::cast(READ_FIELD(this, kKindOffset))->value(); } void Oddball::set_kind(byte value) { WRITE_FIELD(this, kKindOffset, Smi::FromInt(value)); } Object* Cell::value() { return READ_FIELD(this, kValueOffset); } void Cell::set_value(Object* val, WriteBarrierMode ignored) { // The write barrier is not used for global property cells. ASSERT(!val->IsPropertyCell() && !val->IsCell()); WRITE_FIELD(this, kValueOffset, val); } ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset) Object* PropertyCell::type_raw() { return READ_FIELD(this, kTypeOffset); } void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) { WRITE_FIELD(this, kTypeOffset, val); } int JSObject::GetHeaderSize() { InstanceType type = map()->instance_type(); // Check for the most common kind of JavaScript object before // falling into the generic switch. This speeds up the internal // field operations considerably on average. if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize; switch (type) { case JS_GENERATOR_OBJECT_TYPE: return JSGeneratorObject::kSize; case JS_MODULE_TYPE: return JSModule::kSize; case JS_GLOBAL_PROXY_TYPE: return JSGlobalProxy::kSize; case JS_GLOBAL_OBJECT_TYPE: return JSGlobalObject::kSize; case JS_BUILTINS_OBJECT_TYPE: return JSBuiltinsObject::kSize; case JS_FUNCTION_TYPE: return JSFunction::kSize; case JS_VALUE_TYPE: return JSValue::kSize; case JS_DATE_TYPE: return JSDate::kSize; case JS_ARRAY_TYPE: return JSArray::kSize; case JS_ARRAY_BUFFER_TYPE: return JSArrayBuffer::kSize; case JS_TYPED_ARRAY_TYPE: return JSTypedArray::kSize; case JS_DATA_VIEW_TYPE: return JSDataView::kSize; case JS_SET_TYPE: return JSSet::kSize; case JS_MAP_TYPE: return JSMap::kSize; case JS_SET_ITERATOR_TYPE: return JSSetIterator::kSize; case JS_MAP_ITERATOR_TYPE: return JSMapIterator::kSize; case JS_WEAK_MAP_TYPE: return JSWeakMap::kSize; case JS_WEAK_SET_TYPE: return JSWeakSet::kSize; case JS_REGEXP_TYPE: return JSRegExp::kSize; case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return JSObject::kHeaderSize; case JS_MESSAGE_OBJECT_TYPE: return JSMessageObject::kSize; default: // TODO(jkummerow): Re-enable this. Blink currently hits this // from its CustomElementConstructorBuilder. // UNREACHABLE(); return 0; } } int JSObject::GetInternalFieldCount() { ASSERT(1 << kPointerSizeLog2 == kPointerSize); // Make sure to adjust for the number of in-object properties. These // properties do contribute to the size, but are not internal fields. return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) - map()->inobject_properties(); } int JSObject::GetInternalFieldOffset(int index) { ASSERT(index < GetInternalFieldCount() && index >= 0); return GetHeaderSize() + (kPointerSize * index); } Object* JSObject::GetInternalField(int index) { ASSERT(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index)); } void JSObject::SetInternalField(int index, Object* value) { ASSERT(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. int offset = GetHeaderSize() + (kPointerSize * index); WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } void JSObject::SetInternalField(int index, Smi* value) { ASSERT(index < GetInternalFieldCount() && index >= 0); // Internal objects do follow immediately after the header, whereas in-object // properties are at the end of the object. Therefore there is no need // to adjust the index here. int offset = GetHeaderSize() + (kPointerSize * index); WRITE_FIELD(this, offset, value); } // Access fast-case object properties at index. The use of these routines // is needed to correctly distinguish between properties stored in-object and // properties stored in the properties array. Object* JSObject::RawFastPropertyAt(FieldIndex index) { if (index.is_inobject()) { return READ_FIELD(this, index.offset()); } else { return properties()->get(index.outobject_array_index()); } } void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) { if (index.is_inobject()) { int offset = index.offset(); WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } else { properties()->set(index.outobject_array_index(), value); } } int JSObject::GetInObjectPropertyOffset(int index) { return map()->GetInObjectPropertyOffset(index); } Object* JSObject::InObjectPropertyAt(int index) { int offset = GetInObjectPropertyOffset(index); return READ_FIELD(this, offset); } Object* JSObject::InObjectPropertyAtPut(int index, Object* value, WriteBarrierMode mode) { // Adjust for the number of properties stored in the object. int offset = GetInObjectPropertyOffset(index); WRITE_FIELD(this, offset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); return value; } void JSObject::InitializeBody(Map* map, Object* pre_allocated_value, Object* filler_value) { ASSERT(!filler_value->IsHeapObject() || !GetHeap()->InNewSpace(filler_value)); ASSERT(!pre_allocated_value->IsHeapObject() || !GetHeap()->InNewSpace(pre_allocated_value)); int size = map->instance_size(); int offset = kHeaderSize; if (filler_value != pre_allocated_value) { int pre_allocated = map->pre_allocated_property_fields(); ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size); for (int i = 0; i < pre_allocated; i++) { WRITE_FIELD(this, offset, pre_allocated_value); offset += kPointerSize; } } while (offset < size) { WRITE_FIELD(this, offset, filler_value); offset += kPointerSize; } } bool JSObject::HasFastProperties() { ASSERT(properties()->IsDictionary() == map()->is_dictionary_map()); return !properties()->IsDictionary(); } bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) { // Allow extra fast properties if the object has more than // kFastPropertiesSoftLimit in-object properties. When this is the case, it is // very unlikely that the object is being used as a dictionary and there is a // good chance that allowing more map transitions will be worth it. Map* map = this->map(); if (map->unused_property_fields() != 0) return false; int inobject = map->inobject_properties(); int limit; if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) { limit = Max(inobject, kMaxFastProperties); } else { limit = Max(inobject, kFastPropertiesSoftLimit); } return properties()->length() > limit; } void Struct::InitializeBody(int object_size) { Object* value = GetHeap()->undefined_value(); for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) { WRITE_FIELD(this, offset, value); } } bool Object::ToArrayIndex(uint32_t* index) { if (IsSmi()) { int value = Smi::cast(this)->value(); if (value < 0) return false; *index = value; return true; } if (IsHeapNumber()) { double value = HeapNumber::cast(this)->value(); uint32_t uint_value = static_cast
(value); if (value == static_cast
(uint_value)) { *index = uint_value; return true; } } return false; } bool Object::IsStringObjectWithCharacterAt(uint32_t index) { if (!this->IsJSValue()) return false; JSValue* js_value = JSValue::cast(this); if (!js_value->value()->IsString()) return false; String* str = String::cast(js_value->value()); if (index >= static_cast
(str->length())) return false; return true; } void Object::VerifyApiCallResultType() { #if ENABLE_EXTRA_CHECKS if (!(IsSmi() || IsString() || IsSymbol() || IsSpecObject() || IsHeapNumber() || IsUndefined() || IsTrue() || IsFalse() || IsNull())) { FATAL("API call returned invalid object"); } #endif // ENABLE_EXTRA_CHECKS } FixedArrayBase* FixedArrayBase::cast(Object* object) { ASSERT(object->IsFixedArrayBase()); return reinterpret_cast
(object); } Object* FixedArray::get(int index) { SLOW_ASSERT(index >= 0 && index < this->length()); return READ_FIELD(this, kHeaderSize + index * kPointerSize); } Handle
FixedArray::get(Handle
array, int index) { return handle(array->get(index), array->GetIsolate()); } bool FixedArray::is_the_hole(int index) { return get(index) == GetHeap()->the_hole_value(); } void FixedArray::set(int index, Smi* value) { ASSERT(map() != GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); ASSERT(reinterpret_cast
(value)->IsSmi()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); } void FixedArray::set(int index, Object* value) { ASSERT(map() != GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); WRITE_BARRIER(GetHeap(), this, offset, value); } inline bool FixedDoubleArray::is_the_hole_nan(double value) { return BitCast
(value) == kHoleNanInt64; } inline double FixedDoubleArray::hole_nan_as_double() { return BitCast
(kHoleNanInt64); } inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() { ASSERT(BitCast
(OS::nan_value()) != kHoleNanInt64); ASSERT((BitCast
(OS::nan_value()) >> 32) != kHoleNanUpper32); return OS::nan_value(); } double FixedDoubleArray::get_scalar(int index) { ASSERT(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); ASSERT(index >= 0 && index < this->length()); double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize); ASSERT(!is_the_hole_nan(result)); return result; } int64_t FixedDoubleArray::get_representation(int index) { ASSERT(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); ASSERT(index >= 0 && index < this->length()); return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize); } Handle
FixedDoubleArray::get(Handle
array, int index) { if (array->is_the_hole(index)) { return array->GetIsolate()->factory()->the_hole_value(); } else { return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index)); } } void FixedDoubleArray::set(int index, double value) { ASSERT(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double(); WRITE_DOUBLE_FIELD(this, offset, value); } void FixedDoubleArray::set_the_hole(int index) { ASSERT(map() != GetHeap()->fixed_cow_array_map() && map() != GetHeap()->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double()); } bool FixedDoubleArray::is_the_hole(int index) { int offset = kHeaderSize + index * kDoubleSize; return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset)); } double* FixedDoubleArray::data_start() { return reinterpret_cast
(FIELD_ADDR(this, kHeaderSize)); } void FixedDoubleArray::FillWithHoles(int from, int to) { for (int i = from; i < to; i++) { set_the_hole(i); } } bool ConstantPoolArray::is_extended_layout() { uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); return IsExtendedField::decode(small_layout_1); } ConstantPoolArray::LayoutSection ConstantPoolArray::final_section() { return is_extended_layout() ? EXTENDED_SECTION : SMALL_SECTION; } int ConstantPoolArray::first_extended_section_index() { ASSERT(is_extended_layout()); uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); return TotalCountField::decode(small_layout_2); } int ConstantPoolArray::get_extended_section_header_offset() { return RoundUp(SizeFor(NumberOfEntries(this, SMALL_SECTION)), kInt64Size); } ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() { uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); return WeakObjectStateField::decode(small_layout_2); } void ConstantPoolArray::set_weak_object_state( ConstantPoolArray::WeakObjectState state) { uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); small_layout_2 = WeakObjectStateField::update(small_layout_2, state); WRITE_INT32_FIELD(this, kSmallLayout2Offset, small_layout_2); } int ConstantPoolArray::first_index(Type type, LayoutSection section) { int index = 0; if (section == EXTENDED_SECTION) { ASSERT(is_extended_layout()); index += first_extended_section_index(); } for (Type type_iter = FIRST_TYPE; type_iter < type; type_iter = next_type(type_iter)) { index += number_of_entries(type_iter, section); } return index; } int ConstantPoolArray::last_index(Type type, LayoutSection section) { return first_index(type, section) + number_of_entries(type, section) - 1; } int ConstantPoolArray::number_of_entries(Type type, LayoutSection section) { if (section == SMALL_SECTION) { uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); switch (type) { case INT64: return Int64CountField::decode(small_layout_1); case CODE_PTR: return CodePtrCountField::decode(small_layout_1); case HEAP_PTR: return HeapPtrCountField::decode(small_layout_1); case INT32: return Int32CountField::decode(small_layout_2); default: UNREACHABLE(); return 0; } } else { ASSERT(section == EXTENDED_SECTION && is_extended_layout()); int offset = get_extended_section_header_offset(); switch (type) { case INT64: offset += kExtendedInt64CountOffset; break; case CODE_PTR: offset += kExtendedCodePtrCountOffset; break; case HEAP_PTR: offset += kExtendedHeapPtrCountOffset; break; case INT32: offset += kExtendedInt32CountOffset; break; default: UNREACHABLE(); } return READ_INT_FIELD(this, offset); } } ConstantPoolArray::Type ConstantPoolArray::get_type(int index) { LayoutSection section; if (is_extended_layout() && index >= first_extended_section_index()) { section = EXTENDED_SECTION; } else { section = SMALL_SECTION; } Type type = FIRST_TYPE; while (index > last_index(type, section)) { type = next_type(type); } ASSERT(type <= LAST_TYPE); return type; } int64_t ConstantPoolArray::get_int64_entry(int index) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == INT64); return READ_INT64_FIELD(this, OffsetOfElementAt(index)); } double ConstantPoolArray::get_int64_entry_as_double(int index) { STATIC_ASSERT(kDoubleSize == kInt64Size); ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == INT64); return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index)); } Address ConstantPoolArray::get_code_ptr_entry(int index) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == CODE_PTR); return reinterpret_cast
(READ_FIELD(this, OffsetOfElementAt(index))); } Object* ConstantPoolArray::get_heap_ptr_entry(int index) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == HEAP_PTR); return READ_FIELD(this, OffsetOfElementAt(index)); } int32_t ConstantPoolArray::get_int32_entry(int index) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == INT32); return READ_INT32_FIELD(this, OffsetOfElementAt(index)); } void ConstantPoolArray::set(int index, int64_t value) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == INT64); WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value); } void ConstantPoolArray::set(int index, double value) { STATIC_ASSERT(kDoubleSize == kInt64Size); ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == INT64); WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value); } void ConstantPoolArray::set(int index, Address value) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == CODE_PTR); WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast
(value)); } void ConstantPoolArray::set(int index, Object* value) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == HEAP_PTR); WRITE_FIELD(this, OffsetOfElementAt(index), value); WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value); } void ConstantPoolArray::set(int index, int32_t value) { ASSERT(map() == GetHeap()->constant_pool_array_map()); ASSERT(get_type(index) == INT32); WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value); } void ConstantPoolArray::Init(const NumberOfEntries& small) { uint32_t small_layout_1 = Int64CountField::encode(small.count_of(INT64)) | CodePtrCountField::encode(small.count_of(CODE_PTR)) | HeapPtrCountField::encode(small.count_of(HEAP_PTR)) | IsExtendedField::encode(false); uint32_t small_layout_2 = Int32CountField::encode(small.count_of(INT32)) | TotalCountField::encode(small.total_count()) | WeakObjectStateField::encode(NO_WEAK_OBJECTS); WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1); WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2); if (kHeaderSize != kFirstEntryOffset) { ASSERT(kFirstEntryOffset - kHeaderSize == kInt32Size); WRITE_UINT32_FIELD(this, kHeaderSize, 0); // Zero out header padding. } } void ConstantPoolArray::InitExtended(const NumberOfEntries& small, const NumberOfEntries& extended) { // Initialize small layout fields first. Init(small); // Set is_extended_layout field. uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); small_layout_1 = IsExtendedField::update(small_layout_1, true); WRITE_INT32_FIELD(this, kSmallLayout1Offset, small_layout_1); // Initialize the extended layout fields. int extended_header_offset = get_extended_section_header_offset(); WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt64CountOffset, extended.count_of(INT64)); WRITE_INT_FIELD(this, extended_header_offset + kExtendedCodePtrCountOffset, extended.count_of(CODE_PTR)); WRITE_INT_FIELD(this, extended_header_offset + kExtendedHeapPtrCountOffset, extended.count_of(HEAP_PTR)); WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt32CountOffset, extended.count_of(INT32)); } int ConstantPoolArray::size() { NumberOfEntries small(this, SMALL_SECTION); if (!is_extended_layout()) { return SizeFor(small); } else { NumberOfEntries extended(this, EXTENDED_SECTION); return SizeForExtended(small, extended); } } int ConstantPoolArray::length() { uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset); int length = TotalCountField::decode(small_layout_2); if (is_extended_layout()) { length += number_of_entries(INT64, EXTENDED_SECTION) + number_of_entries(CODE_PTR, EXTENDED_SECTION) + number_of_entries(HEAP_PTR, EXTENDED_SECTION) + number_of_entries(INT32, EXTENDED_SECTION); } return length; } int ConstantPoolArray::Iterator::next_index() { ASSERT(!is_finished()); int ret = next_index_++; update_section(); return ret; } bool ConstantPoolArray::Iterator::is_finished() { return next_index_ > array_->last_index(type_, final_section_); } void ConstantPoolArray::Iterator::update_section() { if (next_index_ > array_->last_index(type_, current_section_) && current_section_ != final_section_) { ASSERT(final_section_ == EXTENDED_SECTION); current_section_ = EXTENDED_SECTION; next_index_ = array_->first_index(type_, EXTENDED_SECTION); } } WriteBarrierMode HeapObject::GetWriteBarrierMode( const DisallowHeapAllocation& promise) { Heap* heap = GetHeap(); if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER; if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER; return UPDATE_WRITE_BARRIER; } void FixedArray::set(int index, Object* value, WriteBarrierMode mode) { ASSERT(map() != GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); } void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array, int index, Object* value) { ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < array->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(array, offset, value); Heap* heap = array->GetHeap(); if (heap->InNewSpace(value)) { heap->RecordWrite(array->address(), offset); } } void FixedArray::NoWriteBarrierSet(FixedArray* array, int index, Object* value) { ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < array->length()); ASSERT(!array->GetHeap()->InNewSpace(value)); WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value); } void FixedArray::set_undefined(int index) { ASSERT(map() != GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->undefined_value()); } void FixedArray::set_null(int index) { ASSERT(index >= 0 && index < this->length()); ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->null_value()); } void FixedArray::set_the_hole(int index) { ASSERT(map() != GetHeap()->fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, GetHeap()->the_hole_value()); } void FixedArray::FillWithHoles(int from, int to) { for (int i = from; i < to; i++) { set_the_hole(i); } } Object** FixedArray::data_start() { return HeapObject::RawField(this, kHeaderSize); } bool DescriptorArray::IsEmpty() { ASSERT(length() >= kFirstIndex || this == GetHeap()->empty_descriptor_array()); return length() < kFirstIndex; } void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) { WRITE_FIELD( this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors)); } // Perform a binary search in a fixed array. Low and high are entry indices. If // there are three entries in this array it should be called with low=0 and // high=2. template
int BinarySearch(T* array, Name* name, int low, int high, int valid_entries) { uint32_t hash = name->Hash(); int limit = high; ASSERT(low <= high); while (low != high) { int mid = (low + high) / 2; Name* mid_name = array->GetSortedKey(mid); uint32_t mid_hash = mid_name->Hash(); if (mid_hash >= hash) { high = mid; } else { low = mid + 1; } } for (; low <= limit; ++low) { int sort_index = array->GetSortedKeyIndex(low); Name* entry = array->GetKey(sort_index); if (entry->Hash() != hash) break; if (entry->Equals(name)) { if (search_mode == ALL_ENTRIES || sort_index < valid_entries) { return sort_index; } return T::kNotFound; } } return T::kNotFound; } // Perform a linear search in this fixed array. len is the number of entry // indices that are valid. template
int LinearSearch(T* array, Name* name, int len, int valid_entries) { uint32_t hash = name->Hash(); if (search_mode == ALL_ENTRIES) { for (int number = 0; number < len; number++) { int sorted_index = array->GetSortedKeyIndex(number); Name* entry = array->GetKey(sorted_index); uint32_t current_hash = entry->Hash(); if (current_hash > hash) break; if (current_hash == hash && entry->Equals(name)) return sorted_index; } } else { ASSERT(len >= valid_entries); for (int number = 0; number < valid_entries; number++) { Name* entry = array->GetKey(number); uint32_t current_hash = entry->Hash(); if (current_hash == hash && entry->Equals(name)) return number; } } return T::kNotFound; } template
int Search(T* array, Name* name, int valid_entries) { if (search_mode == VALID_ENTRIES) { SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries)); } else { SLOW_ASSERT(array->IsSortedNoDuplicates()); } int nof = array->number_of_entries(); if (nof == 0) return T::kNotFound; // Fast case: do linear search for small arrays. const int kMaxElementsForLinearSearch = 8; if ((search_mode == ALL_ENTRIES && nof <= kMaxElementsForLinearSearch) || (search_mode == VALID_ENTRIES && valid_entries <= (kMaxElementsForLinearSearch * 3))) { return LinearSearch
(array, name, nof, valid_entries); } // Slow case: perform binary search. return BinarySearch
(array, name, 0, nof - 1, valid_entries); } int DescriptorArray::Search(Name* name, int valid_descriptors) { return internal::Search
(this, name, valid_descriptors); } int DescriptorArray::SearchWithCache(Name* name, Map* map) { int number_of_own_descriptors = map->NumberOfOwnDescriptors(); if (number_of_own_descriptors == 0) return kNotFound; DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache(); int number = cache->Lookup(map, name); if (number == DescriptorLookupCache::kAbsent) { number = Search(name, number_of_own_descriptors); cache->Update(map, name, number); } return number; } PropertyDetails Map::GetLastDescriptorDetails() { return instance_descriptors()->GetDetails(LastAdded()); } void Map::LookupDescriptor(JSObject* holder, Name* name, LookupResult* result) { DescriptorArray* descriptors = this->instance_descriptors(); int number = descriptors->SearchWithCache(name, this); if (number == DescriptorArray::kNotFound) return result->NotFound(); result->DescriptorResult(holder, descriptors->GetDetails(number), number); } void Map::LookupTransition(JSObject* holder, Name* name, LookupResult* result) { int transition_index = this->SearchTransition(name); if (transition_index == TransitionArray::kNotFound) return result->NotFound(); result->TransitionResult(holder, this->GetTransition(transition_index)); } FixedArrayBase* Map::GetInitialElements() { if (has_fast_smi_or_object_elements() || has_fast_double_elements()) { ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); return GetHeap()->empty_fixed_array(); } else if (has_external_array_elements()) { ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(this); ASSERT(!GetHeap()->InNewSpace(empty_array)); return empty_array; } else if (has_fixed_typed_array_elements()) { FixedTypedArrayBase* empty_array = GetHeap()->EmptyFixedTypedArrayForMap(this); ASSERT(!GetHeap()->InNewSpace(empty_array)); return empty_array; } else if (has_dictionary_elements()) { ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_slow_element_dictionary())); return GetHeap()->empty_slow_element_dictionary(); } else { UNREACHABLE(); } return NULL; } Object** DescriptorArray::GetKeySlot(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); return RawFieldOfElementAt(ToKeyIndex(descriptor_number)); } Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) { return GetKeySlot(descriptor_number); } Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) { return GetValueSlot(descriptor_number - 1) + 1; } Name* DescriptorArray::GetKey(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); return Name::cast(get(ToKeyIndex(descriptor_number))); } int DescriptorArray::GetSortedKeyIndex(int descriptor_number) { return GetDetails(descriptor_number).pointer(); } Name* DescriptorArray::GetSortedKey(int descriptor_number) { return GetKey(GetSortedKeyIndex(descriptor_number)); } void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) { PropertyDetails details = GetDetails(descriptor_index); set(ToDetailsIndex(descriptor_index), details.set_pointer(pointer).AsSmi()); } void DescriptorArray::SetRepresentation(int descriptor_index, Representation representation) { ASSERT(!representation.IsNone()); PropertyDetails details = GetDetails(descriptor_index); set(ToDetailsIndex(descriptor_index), details.CopyWithRepresentation(representation).AsSmi()); } Object** DescriptorArray::GetValueSlot(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); return RawFieldOfElementAt(ToValueIndex(descriptor_number)); } Object* DescriptorArray::GetValue(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); return get(ToValueIndex(descriptor_number)); } void DescriptorArray::SetValue(int descriptor_index, Object* value) { set(ToValueIndex(descriptor_index), value); } PropertyDetails DescriptorArray::GetDetails(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); Object* details = get(ToDetailsIndex(descriptor_number)); return PropertyDetails(Smi::cast(details)); } PropertyType DescriptorArray::GetType(int descriptor_number) { return GetDetails(descriptor_number).type(); } int DescriptorArray::GetFieldIndex(int descriptor_number) { ASSERT(GetDetails(descriptor_number).type() == FIELD); return GetDetails(descriptor_number).field_index(); } HeapType* DescriptorArray::GetFieldType(int descriptor_number) { ASSERT(GetDetails(descriptor_number).type() == FIELD); return HeapType::cast(GetValue(descriptor_number)); } Object* DescriptorArray::GetConstant(int descriptor_number) { return GetValue(descriptor_number); } Object* DescriptorArray::GetCallbacksObject(int descriptor_number) { ASSERT(GetType(descriptor_number) == CALLBACKS); return GetValue(descriptor_number); } AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) { ASSERT(GetType(descriptor_number) == CALLBACKS); Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number)); return reinterpret_cast
(p->foreign_address()); } void DescriptorArray::Get(int descriptor_number, Descriptor* desc) { desc->Init(handle(GetKey(descriptor_number), GetIsolate()), handle(GetValue(descriptor_number), GetIsolate()), GetDetails(descriptor_number)); } void DescriptorArray::Set(int descriptor_number, Descriptor* desc, const WhitenessWitness&) { // Range check. ASSERT(descriptor_number < number_of_descriptors()); NoIncrementalWriteBarrierSet(this, ToKeyIndex(descriptor_number), *desc->GetKey()); NoIncrementalWriteBarrierSet(this, ToValueIndex(descriptor_number), *desc->GetValue()); NoIncrementalWriteBarrierSet(this, ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi()); } void DescriptorArray::Set(int descriptor_number, Descriptor* desc) { // Range check. ASSERT(descriptor_number < number_of_descriptors()); set(ToKeyIndex(descriptor_number), *desc->GetKey()); set(ToValueIndex(descriptor_number), *desc->GetValue()); set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi()); } void DescriptorArray::Append(Descriptor* desc, const WhitenessWitness& witness) { DisallowHeapAllocation no_gc; int descriptor_number = number_of_descriptors(); SetNumberOfDescriptors(descriptor_number + 1); Set(descriptor_number, desc, witness); uint32_t hash = desc->GetKey()->Hash(); int insertion; for (insertion = descriptor_number; insertion > 0; --insertion) { Name* key = GetSortedKey(insertion - 1); if (key->Hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } SetSortedKey(insertion, descriptor_number); } void DescriptorArray::Append(Descriptor* desc) { DisallowHeapAllocation no_gc; int descriptor_number = number_of_descriptors(); SetNumberOfDescriptors(descriptor_number + 1); Set(descriptor_number, desc); uint32_t hash = desc->GetKey()->Hash(); int insertion; for (insertion = descriptor_number; insertion > 0; --insertion) { Name* key = GetSortedKey(insertion - 1); if (key->Hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } SetSortedKey(insertion, descriptor_number); } void DescriptorArray::SwapSortedKeys(int first, int second) { int first_key = GetSortedKeyIndex(first); SetSortedKey(first, GetSortedKeyIndex(second)); SetSortedKey(second, first_key); } DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array) : marking_(array->GetHeap()->incremental_marking()) { marking_->EnterNoMarkingScope(); ASSERT(!marking_->IsMarking() || Marking::Color(array) == Marking::WHITE_OBJECT); } DescriptorArray::WhitenessWitness::~WhitenessWitness() { marking_->LeaveNoMarkingScope(); } template
int HashTable
::ComputeCapacity(int at_least_space_for) { const int kMinCapacity = 32; int capacity = RoundUpToPowerOf2(at_least_space_for * 2); if (capacity < kMinCapacity) { capacity = kMinCapacity; // Guarantee min capacity. } return capacity; } template
int HashTable
::FindEntry(Key key) { return FindEntry(GetIsolate(), key); } // Find entry for key otherwise return kNotFound. template
int HashTable
::FindEntry(Isolate* isolate, Key key) { uint32_t capacity = Capacity(); uint32_t entry = FirstProbe(HashTable::Hash(key), capacity); uint32_t count = 1; // EnsureCapacity will guarantee the hash table is never full. while (true) { Object* element = KeyAt(entry); // Empty entry. Uses raw unchecked accessors because it is called by the // string table during bootstrapping. if (element == isolate->heap()->raw_unchecked_undefined_value()) break; if (element != isolate->heap()->raw_unchecked_the_hole_value() && Shape::IsMatch(key, element)) return entry; entry = NextProbe(entry, count++, capacity); } return kNotFound; } bool SeededNumberDictionary::requires_slow_elements() { Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return false; return 0 != (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask); } uint32_t SeededNumberDictionary::max_number_key() { ASSERT(!requires_slow_elements()); Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return 0; uint32_t value = static_cast
(Smi::cast(max_index_object)->value()); return value >> kRequiresSlowElementsTagSize; } void SeededNumberDictionary::set_requires_slow_elements() { set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask)); } // ------------------------------------ // Cast operations CAST_ACCESSOR(FixedArray) CAST_ACCESSOR(FixedDoubleArray) CAST_ACCESSOR(FixedTypedArrayBase) CAST_ACCESSOR(ConstantPoolArray) CAST_ACCESSOR(DescriptorArray) CAST_ACCESSOR(DeoptimizationInputData) CAST_ACCESSOR(DeoptimizationOutputData) CAST_ACCESSOR(DependentCode) CAST_ACCESSOR(StringTable) CAST_ACCESSOR(JSFunctionResultCache) CAST_ACCESSOR(NormalizedMapCache) CAST_ACCESSOR(ScopeInfo) CAST_ACCESSOR(CompilationCacheTable) CAST_ACCESSOR(CodeCacheHashTable) CAST_ACCESSOR(PolymorphicCodeCacheHashTable) CAST_ACCESSOR(MapCache) CAST_ACCESSOR(String) CAST_ACCESSOR(SeqString) CAST_ACCESSOR(SeqOneByteString) CAST_ACCESSOR(SeqTwoByteString) CAST_ACCESSOR(SlicedString) CAST_ACCESSOR(ConsString) CAST_ACCESSOR(ExternalString) CAST_ACCESSOR(ExternalAsciiString) CAST_ACCESSOR(ExternalTwoByteString) CAST_ACCESSOR(Symbol) CAST_ACCESSOR(Name) CAST_ACCESSOR(JSReceiver) CAST_ACCESSOR(JSObject) CAST_ACCESSOR(Smi) CAST_ACCESSOR(HeapObject) CAST_ACCESSOR(HeapNumber) CAST_ACCESSOR(Oddball) CAST_ACCESSOR(Cell) CAST_ACCESSOR(PropertyCell) CAST_ACCESSOR(SharedFunctionInfo) CAST_ACCESSOR(Map) CAST_ACCESSOR(JSFunction) CAST_ACCESSOR(GlobalObject) CAST_ACCESSOR(JSGlobalProxy) CAST_ACCESSOR(JSGlobalObject) CAST_ACCESSOR(JSBuiltinsObject) CAST_ACCESSOR(Code) CAST_ACCESSOR(JSArray) CAST_ACCESSOR(JSArrayBuffer) CAST_ACCESSOR(JSArrayBufferView) CAST_ACCESSOR(JSTypedArray) CAST_ACCESSOR(JSDataView) CAST_ACCESSOR(JSRegExp) CAST_ACCESSOR(JSProxy) CAST_ACCESSOR(JSFunctionProxy) CAST_ACCESSOR(JSSet) CAST_ACCESSOR(JSMap) CAST_ACCESSOR(JSSetIterator) CAST_ACCESSOR(JSMapIterator) CAST_ACCESSOR(JSWeakMap) CAST_ACCESSOR(JSWeakSet) CAST_ACCESSOR(Foreign) CAST_ACCESSOR(ByteArray) CAST_ACCESSOR(FreeSpace) CAST_ACCESSOR(ExternalArray) CAST_ACCESSOR(ExternalInt8Array) CAST_ACCESSOR(ExternalUint8Array) CAST_ACCESSOR(ExternalInt16Array) CAST_ACCESSOR(ExternalUint16Array) CAST_ACCESSOR(ExternalInt32Array) CAST_ACCESSOR(ExternalUint32Array) CAST_ACCESSOR(ExternalFloat32Array) CAST_ACCESSOR(ExternalFloat64Array) CAST_ACCESSOR(ExternalUint8ClampedArray) CAST_ACCESSOR(Struct) CAST_ACCESSOR(AccessorInfo) template
FixedTypedArray
* FixedTypedArray
::cast(Object* object) { SLOW_ASSERT(object->IsHeapObject() && HeapObject::cast(object)->map()->instance_type() == Traits::kInstanceType); return reinterpret_cast
*>(object); } #define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name) STRUCT_LIST(MAKE_STRUCT_CAST) #undef MAKE_STRUCT_CAST template
HashTable
* HashTable
::cast(Object* obj) { ASSERT(obj->IsHashTable()); return reinterpret_cast
(obj); } SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) SMI_ACCESSORS(FreeSpace, size, kSizeOffset) NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset) SMI_ACCESSORS(String, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset) uint32_t Name::hash_field() { return READ_UINT32_FIELD(this, kHashFieldOffset); } void Name::set_hash_field(uint32_t value) { WRITE_UINT32_FIELD(this, kHashFieldOffset, value); #if V8_HOST_ARCH_64_BIT WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0); #endif } bool Name::Equals(Name* other) { if (other == this) return true; if ((this->IsInternalizedString() && other->IsInternalizedString()) || this->IsSymbol() || other->IsSymbol()) { return false; } return String::cast(this)->SlowEquals(String::cast(other)); } bool Name::Equals(Handle
one, Handle
two) { if (one.is_identical_to(two)) return true; if ((one->IsInternalizedString() && two->IsInternalizedString()) || one->IsSymbol() || two->IsSymbol()) { return false; } return String::SlowEquals(Handle
::cast(one), Handle
::cast(two)); } ACCESSORS(Symbol, name, Object, kNameOffset) ACCESSORS(Symbol, flags, Smi, kFlagsOffset) BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit) bool String::Equals(String* other) { if (other == this) return true; if (this->IsInternalizedString() && other->IsInternalizedString()) { return false; } return SlowEquals(other); } bool String::Equals(Handle
one, Handle
two) { if (one.is_identical_to(two)) return true; if (one->IsInternalizedString() && two->IsInternalizedString()) { return false; } return SlowEquals(one, two); } Handle
String::Flatten(Handle
string, PretenureFlag pretenure) { if (!string->IsConsString()) return string; Handle
cons = Handle
::cast(string); if (cons->IsFlat()) return handle(cons->first()); return SlowFlatten(cons, pretenure); } uint16_t String::Get(int index) { ASSERT(index >= 0 && index < length()); switch (StringShape(this).full_representation_tag()) { case kSeqStringTag | kOneByteStringTag: return SeqOneByteString::cast(this)->SeqOneByteStringGet(index); case kSeqStringTag | kTwoByteStringTag: return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index); case kConsStringTag | kOneByteStringTag: case kConsStringTag | kTwoByteStringTag: return ConsString::cast(this)->ConsStringGet(index); case kExternalStringTag | kOneByteStringTag: return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index); case kExternalStringTag | kTwoByteStringTag: return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index); case kSlicedStringTag | kOneByteStringTag: case kSlicedStringTag | kTwoByteStringTag: return SlicedString::cast(this)->SlicedStringGet(index); default: break; } UNREACHABLE(); return 0; } void String::Set(int index, uint16_t value) { ASSERT(index >= 0 && index < length()); ASSERT(StringShape(this).IsSequential()); return this->IsOneByteRepresentation() ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value) : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value); } bool String::IsFlat() { if (!StringShape(this).IsCons()) return true; return ConsString::cast(this)->second()->length() == 0; } String* String::GetUnderlying() { // Giving direct access to underlying string only makes sense if the // wrapping string is already flattened. ASSERT(this->IsFlat()); ASSERT(StringShape(this).IsIndirect()); STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset); const int kUnderlyingOffset = SlicedString::kParentOffset; return String::cast(READ_FIELD(this, kUnderlyingOffset)); } template
ConsString* String::VisitFlat(Visitor* visitor, String* string, const int offset) { int slice_offset = offset; const int length = string->length(); ASSERT(offset <= length); while (true) { int32_t type = string->map()->instance_type(); switch (type & (kStringRepresentationMask | kStringEncodingMask)) { case kSeqStringTag | kOneByteStringTag: visitor->VisitOneByteString( SeqOneByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kSeqStringTag | kTwoByteStringTag: visitor->VisitTwoByteString( SeqTwoByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kExternalStringTag | kOneByteStringTag: visitor->VisitOneByteString( ExternalAsciiString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kExternalStringTag | kTwoByteStringTag: visitor->VisitTwoByteString( ExternalTwoByteString::cast(string)->GetChars() + slice_offset, length - offset); return NULL; case kSlicedStringTag | kOneByteStringTag: case kSlicedStringTag | kTwoByteStringTag: { SlicedString* slicedString = SlicedString::cast(string); slice_offset += slicedString->offset(); string = slicedString->parent(); continue; } case kConsStringTag | kOneByteStringTag: case kConsStringTag | kTwoByteStringTag: return ConsString::cast(string); default: UNREACHABLE(); return NULL; } } } uint16_t SeqOneByteString::SeqOneByteStringGet(int index) { ASSERT(index >= 0 && index < length()); return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize); } void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) { ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode); WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, static_cast
(value)); } Address SeqOneByteString::GetCharsAddress() { return FIELD_ADDR(this, kHeaderSize); } uint8_t* SeqOneByteString::GetChars() { return reinterpret_cast
(GetCharsAddress()); } Address SeqTwoByteString::GetCharsAddress() { return FIELD_ADDR(this, kHeaderSize); } uc16* SeqTwoByteString::GetChars() { return reinterpret_cast
(FIELD_ADDR(this, kHeaderSize)); } uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) { ASSERT(index >= 0 && index < length()); return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize); } void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) { ASSERT(index >= 0 && index < length()); WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value); } int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) { return SizeFor(length()); } int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) { return SizeFor(length()); } String* SlicedString::parent() { return String::cast(READ_FIELD(this, kParentOffset)); } void SlicedString::set_parent(String* parent, WriteBarrierMode mode) { ASSERT(parent->IsSeqString() || parent->IsExternalString()); WRITE_FIELD(this, kParentOffset, parent); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode); } SMI_ACCESSORS(SlicedString, offset, kOffsetOffset) String* ConsString::first() { return String::cast(READ_FIELD(this, kFirstOffset)); } Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); } void ConsString::set_first(String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kFirstOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode); } String* ConsString::second() { return String::cast(READ_FIELD(this, kSecondOffset)); } Object* ConsString::unchecked_second() { return READ_FIELD(this, kSecondOffset); } void ConsString::set_second(String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kSecondOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode); } bool ExternalString::is_short() { InstanceType type = map()->instance_type(); return (type & kShortExternalStringMask) == kShortExternalStringTag; } const ExternalAsciiString::Resource* ExternalAsciiString::resource() { return *reinterpret_cast
(FIELD_ADDR(this, kResourceOffset)); } void ExternalAsciiString::update_data_cache() { if (is_short()) return; const char** data_field = reinterpret_cast
(FIELD_ADDR(this, kResourceDataOffset)); *data_field = resource()->data(); } void ExternalAsciiString::set_resource( const ExternalAsciiString::Resource* resource) { ASSERT(IsAligned(reinterpret_cast
(resource), kPointerSize)); *reinterpret_cast
( FIELD_ADDR(this, kResourceOffset)) = resource; if (resource != NULL) update_data_cache(); } const uint8_t* ExternalAsciiString::GetChars() { return reinterpret_cast
(resource()->data()); } uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) { ASSERT(index >= 0 && index < length()); return GetChars()[index]; } const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() { return *reinterpret_cast
(FIELD_ADDR(this, kResourceOffset)); } void ExternalTwoByteString::update_data_cache() { if (is_short()) return; const uint16_t** data_field = reinterpret_cast
(FIELD_ADDR(this, kResourceDataOffset)); *data_field = resource()->data(); } void ExternalTwoByteString::set_resource( const ExternalTwoByteString::Resource* resource) { *reinterpret_cast
( FIELD_ADDR(this, kResourceOffset)) = resource; if (resource != NULL) update_data_cache(); } const uint16_t* ExternalTwoByteString::GetChars() { return resource()->data(); } uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) { ASSERT(index >= 0 && index < length()); return GetChars()[index]; } const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData( unsigned start) { return GetChars() + start; } int ConsStringIteratorOp::OffsetForDepth(int depth) { return depth & kDepthMask; } void ConsStringIteratorOp::PushLeft(ConsString* string) { frames_[depth_++ & kDepthMask] = string; } void ConsStringIteratorOp::PushRight(ConsString* string) { // Inplace update. frames_[(depth_-1) & kDepthMask] = string; } void ConsStringIteratorOp::AdjustMaximumDepth() { if (depth_ > maximum_depth_) maximum_depth_ = depth_; } void ConsStringIteratorOp::Pop() { ASSERT(depth_ > 0); ASSERT(depth_ <= maximum_depth_); depth_--; } uint16_t StringCharacterStream::GetNext() { ASSERT(buffer8_ != NULL && end_ != NULL); // Advance cursor if needed. if (buffer8_ == end_) HasMore(); ASSERT(buffer8_ < end_); return is_one_byte_ ? *buffer8_++ : *buffer16_++; } StringCharacterStream::StringCharacterStream(String* string, ConsStringIteratorOp* op, int offset) : is_one_byte_(false), op_(op) { Reset(string, offset); } void StringCharacterStream::Reset(String* string, int offset) { buffer8_ = NULL; end_ = NULL; ConsString* cons_string = String::VisitFlat(this, string, offset); op_->Reset(cons_string, offset); if (cons_string != NULL) { string = op_->Next(&offset); if (string != NULL) String::VisitFlat(this, string, offset); } } bool StringCharacterStream::HasMore() { if (buffer8_ != end_) return true; int offset; String* string = op_->Next(&offset); ASSERT_EQ(offset, 0); if (string == NULL) return false; String::VisitFlat(this, string); ASSERT(buffer8_ != end_); return true; } void StringCharacterStream::VisitOneByteString( const uint8_t* chars, int length) { is_one_byte_ = true; buffer8_ = chars; end_ = chars + length; } void StringCharacterStream::VisitTwoByteString( const uint16_t* chars, int length) { is_one_byte_ = false; buffer16_ = chars; end_ = reinterpret_cast
(chars + length); } void JSFunctionResultCache::MakeZeroSize() { set_finger_index(kEntriesIndex); set_size(kEntriesIndex); } void JSFunctionResultCache::Clear() { int cache_size = size(); Object** entries_start = RawFieldOfElementAt(kEntriesIndex); MemsetPointer(entries_start, GetHeap()->the_hole_value(), cache_size - kEntriesIndex); MakeZeroSize(); } int JSFunctionResultCache::size() { return Smi::cast(get(kCacheSizeIndex))->value(); } void JSFunctionResultCache::set_size(int size) { set(kCacheSizeIndex, Smi::FromInt(size)); } int JSFunctionResultCache::finger_index() { return Smi::cast(get(kFingerIndex))->value(); } void JSFunctionResultCache::set_finger_index(int finger_index) { set(kFingerIndex, Smi::FromInt(finger_index)); } byte ByteArray::get(int index) { ASSERT(index >= 0 && index < this->length()); return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize); } void ByteArray::set(int index, byte value) { ASSERT(index >= 0 && index < this->length()); WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value); } int ByteArray::get_int(int index) { ASSERT(index >= 0 && (index * kIntSize) < this->length()); return READ_INT_FIELD(this, kHeaderSize + index * kIntSize); } ByteArray* ByteArray::FromDataStartAddress(Address address) { ASSERT_TAG_ALIGNED(address); return reinterpret_cast
(address - kHeaderSize + kHeapObjectTag); } Address ByteArray::GetDataStartAddress() { return reinterpret_cast
(this) - kHeapObjectTag + kHeaderSize; } uint8_t* ExternalUint8ClampedArray::external_uint8_clamped_pointer() { return reinterpret_cast
(external_pointer()); } uint8_t ExternalUint8ClampedArray::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); uint8_t* ptr = external_uint8_clamped_pointer(); return ptr[index]; } Handle
ExternalUint8ClampedArray::get( Handle
array, int index) { return Handle
(Smi::FromInt(array->get_scalar(index)), array->GetIsolate()); } void ExternalUint8ClampedArray::set(int index, uint8_t value) { ASSERT((index >= 0) && (index < this->length())); uint8_t* ptr = external_uint8_clamped_pointer(); ptr[index] = value; } void* ExternalArray::external_pointer() { intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset); return reinterpret_cast
(ptr); } void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) { intptr_t ptr = reinterpret_cast
(value); WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr); } int8_t ExternalInt8Array::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); int8_t* ptr = static_cast
(external_pointer()); return ptr[index]; } Handle
ExternalInt8Array::get(Handle
array, int index) { return Handle
(Smi::FromInt(array->get_scalar(index)), array->GetIsolate()); } void ExternalInt8Array::set(int index, int8_t value) { ASSERT((index >= 0) && (index < this->length())); int8_t* ptr = static_cast
(external_pointer()); ptr[index] = value; } uint8_t ExternalUint8Array::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); uint8_t* ptr = static_cast
(external_pointer()); return ptr[index]; } Handle
ExternalUint8Array::get(Handle
array, int index) { return Handle
(Smi::FromInt(array->get_scalar(index)), array->GetIsolate()); } void ExternalUint8Array::set(int index, uint8_t value) { ASSERT((index >= 0) && (index < this->length())); uint8_t* ptr = static_cast
(external_pointer()); ptr[index] = value; } int16_t ExternalInt16Array::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); int16_t* ptr = static_cast
(external_pointer()); return ptr[index]; } Handle
ExternalInt16Array::get(Handle
array, int index) { return Handle
(Smi::FromInt(array->get_scalar(index)), array->GetIsolate()); } void ExternalInt16Array::set(int index, int16_t value) { ASSERT((index >= 0) && (index < this->length())); int16_t* ptr = static_cast
(external_pointer()); ptr[index] = value; } uint16_t ExternalUint16Array::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); uint16_t* ptr = static_cast
(external_pointer()); return ptr[index]; } Handle
ExternalUint16Array::get(Handle
array, int index) { return Handle
(Smi::FromInt(array->get_scalar(index)), array->GetIsolate()); } void ExternalUint16Array::set(int index, uint16_t value) { ASSERT((index >= 0) && (index < this->length())); uint16_t* ptr = static_cast
(external_pointer()); ptr[index] = value; } int32_t ExternalInt32Array::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); int32_t* ptr = static_cast
(external_pointer()); return ptr[index]; } Handle
ExternalInt32Array::get(Handle
array, int index) { return array->GetIsolate()->factory()-> NewNumberFromInt(array->get_scalar(index)); } void ExternalInt32Array::set(int index, int32_t value) { ASSERT((index >= 0) && (index < this->length())); int32_t* ptr = static_cast
(external_pointer()); ptr[index] = value; } uint32_t ExternalUint32Array::get_scalar(int index) { ASSERT((index >= 0) && (index < this->length())); uint32_t* ptr = static_cast
(external_pointer()); return ptr[index]; } Handle
ExternalUint32Array::get(Handle
array, int index) { return array->GetIsolate()->factory()-> NewNumberFromUint(array->get_scalar(index)); } void ExternalUint32Array::set(int index, uint32_t value) { ASSERT((index >= 0) && (index < this->length())); uint32_t* ptr = static_cast