code = isolate->factory()->NewCode( desc, Code::ComputeFlags(Code::STUB), Handle()); CHECK(code->IsCode()); HeapObject* obj = HeapObject::cast(*code); Address obj_addr = obj->address(); for (int i = 0; i < obj->Size(); i += kPointerSize) { Object* found = isolate->FindCodeObject(obj_addr + i); CHECK_EQ(*code, found); } Handle copy = isolate->factory()->NewCode( desc, Code::ComputeFlags(Code::STUB), Handle()); HeapObject* obj_copy = HeapObject::cast(*copy); Object* not_right = isolate->FindCodeObject(obj_copy->address() + obj_copy->Size() / 2); CHECK(not_right != *code); } TEST(HandleNull) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); HandleScope outer_scope(isolate); LocalContext context; Handle n(static_cast(nullptr), isolate); CHECK(!n.is_null()); } TEST(HeapObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); HandleScope sc(isolate); Handle value = factory->NewNumber(1.000123); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(1.000123, value->Number()); value = factory->NewNumber(1.0); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1.0, value->Number()); value = factory->NewNumberFromInt(1024); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1024.0, value->Number()); value = factory->NewNumberFromInt(Smi::kMinValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMinValue, Handle::cast(value)->value()); value = factory->NewNumberFromInt(Smi::kMaxValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMaxValue, Handle::cast(value)->value()); #if !defined(V8_TARGET_ARCH_64_BIT) // TODO(lrn): We need a NumberFromIntptr function in order to test this. value = factory->NewNumberFromInt(Smi::kMinValue - 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(Smi::kMinValue - 1), value->Number()); #endif value = factory->NewNumberFromUint(static_cast(Smi::kMaxValue) + 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(Smi::kMaxValue) + 1), value->Number()); value = factory->NewNumberFromUint(static_cast(1) << 31); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(1) << 31), value->Number()); // nan oddball checks CHECK(factory->nan_value()->IsNumber()); CHECK(std::isnan(factory->nan_value()->Number())); Handle s = factory->NewStringFromStaticChars("fisk hest "); CHECK(s->IsString()); CHECK_EQ(10, s->length()); Handle object_string = Handle::cast(factory->Object_string()); Handle global( CcTest::i_isolate()->context()->global_object()); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string)); // Check ToString for oddballs CheckOddball(isolate, heap->true_value(), "true"); CheckOddball(isolate, heap->false_value(), "false"); CheckOddball(isolate, heap->null_value(), "null"); CheckOddball(isolate, heap->undefined_value(), "undefined"); // Check ToString for Smis CheckSmi(isolate, 0, "0"); CheckSmi(isolate, 42, "42"); CheckSmi(isolate, -42, "-42"); // Check ToString for Numbers CheckNumber(isolate, 1.1, "1.1"); CheckFindCodeObject(isolate); } template static void CheckSimdValue(T* value, LANE_TYPE lane_values[LANES], LANE_TYPE other_value) { // Check against lane_values, and check that all lanes can be set to // other_value without disturbing the other lanes. for (int i = 0; i < LANES; i++) { CHECK_EQ(lane_values[i], value->get_lane(i)); } for (int i = 0; i < LANES; i++) { value->set_lane(i, other_value); // change the value for (int j = 0; j < LANES; j++) { if (i != j) CHECK_EQ(lane_values[j], value->get_lane(j)); else CHECK_EQ(other_value, value->get_lane(j)); } value->set_lane(i, lane_values[i]); // restore the lane } CHECK(value->BooleanValue()); // SIMD values are 'true'. } TEST(SimdObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Float32x4 { float lanes[4] = {1, 2, 3, 4}; float quiet_NaN = std::numeric_limits::quiet_NaN(); float signaling_NaN = std::numeric_limits::signaling_NaN(); Handle value = factory->NewFloat32x4(lanes); CHECK(value->IsFloat32x4()); CheckSimdValue(*value, lanes, 3.14f); // Check special lane values. value->set_lane(1, -0.0); CHECK_EQ(-0.0f, value->get_lane(1)); CHECK(std::signbit(value->get_lane(1))); // Sign bit should be preserved. value->set_lane(2, quiet_NaN); CHECK(std::isnan(value->get_lane(2))); value->set_lane(3, signaling_NaN); CHECK(std::isnan(value->get_lane(3))); #ifdef OBJECT_PRINT // Check value printing. { value = factory->NewFloat32x4(lanes); std::ostringstream os; value->Float32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); } { float special_lanes[4] = {0, -0.0, quiet_NaN, signaling_NaN}; value = factory->NewFloat32x4(special_lanes); std::ostringstream os; value->Float32x4Print(os); // Value printing doesn't preserve signed zeroes. CHECK_EQ("0, 0, NaN, NaN", os.str()); } #endif // OBJECT_PRINT } // Int32x4 { int32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewInt32x4(lanes); CHECK(value->IsInt32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Int32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Uint32x4 { uint32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewUint32x4(lanes); CHECK(value->IsUint32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Bool32x4 { bool lanes[4] = {true, false, true, false}; Handle value = factory->NewBool32x4(lanes); CHECK(value->IsBool32x4()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool32x4Print(os); CHECK_EQ("true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int16x8 { int16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewInt16x8(lanes); CHECK(value->IsInt16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Int16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Uint16x8 { uint16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewUint16x8(lanes); CHECK(value->IsUint16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Bool16x8 { bool lanes[8] = {true, false, true, false, true, false, true, false}; Handle value = factory->NewBool16x8(lanes); CHECK(value->IsBool16x8()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool16x8Print(os); CHECK_EQ("true, false, true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int8x16 { int8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewInt8x16(lanes); CHECK(value->IsInt8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Int8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Uint8x16 { uint8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewUint8x16(lanes); CHECK(value->IsUint8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Bool8x16 { bool lanes[16] = {true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}; Handle value = factory->NewBool8x16(lanes); CHECK(value->IsBool8x16()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool8x16Print(os); CHECK_EQ( "true, false, true, false, true, false, true, false, true, false, " "true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } } TEST(Tagging) { CcTest::InitializeVM(); int request = 24; CHECK_EQ(request, static_cast(OBJECT_POINTER_ALIGN(request))); CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi()); CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi()); } TEST(GarbageCollection) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Check GC. heap->CollectGarbage(NEW_SPACE); Handle global( CcTest::i_isolate()->context()->global_object()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle prop_namex = factory->InternalizeUtf8String("theSlotx"); Handle obj_name = factory->InternalizeUtf8String("theObject"); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); { HandleScope inner_scope(isolate); // Allocate a function and keep it in global object's property. Handle function = factory->NewFunction(name); JSReceiver::SetProperty(global, name, function, SLOPPY).Check(); // Allocate an object. Unrooted after leaving the scope. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_namex, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(obj, prop_namex).ToHandleChecked()); } heap->CollectGarbage(NEW_SPACE); // Function should be alive. CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name)); // Check function is retained. Handle func_value = Object::GetProperty(global, name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); { HandleScope inner_scope(isolate); // Allocate another object, make it reachable from global. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); } // After gc, it should survive. heap->CollectGarbage(NEW_SPACE); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name)); Handle obj = Object::GetProperty(global, obj_name).ToHandleChecked(); CHECK(obj->IsJSObject()); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); } static void VerifyStringAllocation(Isolate* isolate, const char* string) { HandleScope scope(isolate); Handle s = isolate->factory()->NewStringFromUtf8( CStrVector(string)).ToHandleChecked(); CHECK_EQ(StrLength(string), s->length()); for (int index = 0; index < s->length(); index++) { CHECK_EQ(static_cast(string[index]), s->Get(index)); } } TEST(String) { CcTest::InitializeVM(); Isolate* isolate = reinterpret_cast(CcTest::isolate()); VerifyStringAllocation(isolate, "a"); VerifyStringAllocation(isolate, "ab"); VerifyStringAllocation(isolate, "abc"); VerifyStringAllocation(isolate, "abcd"); VerifyStringAllocation(isolate, "fiskerdrengen er paa havet"); } TEST(LocalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* name = "Kasper the spunky"; Handle string = factory->NewStringFromAsciiChecked(name); CHECK_EQ(StrLength(name), string->length()); } TEST(GlobalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); Handle h1; Handle h2; Handle h3; Handle h4; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); h3 = global_handles->Create(*i); h4 = global_handles->Create(*u); } // after gc, it should survive heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK((*h3)->IsString()); CHECK((*h4)->IsHeapNumber()); CHECK_EQ(*h3, *h1); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h3.location()); CHECK_EQ(*h4, *h2); GlobalHandles::Destroy(h2.location()); GlobalHandles::Destroy(h4.location()); } static bool WeakPointerCleared = false; static void TestWeakGlobalHandleCallback( const v8::WeakCallbackInfo& data) { std::pair*, int>* p = reinterpret_cast*, int>*>( data.GetParameter()); if (p->second == 1234) WeakPointerCleared = true; p->first->Reset(); } TEST(WeakGlobalHandlesScavenge) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scavenge treats weak pointers as normal roots. heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK(!WeakPointerCleared); CHECK(!global_handles->IsNearDeath(h2.location())); CHECK(!global_handles->IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h2.location()); } TEST(WeakGlobalHandlesMark) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } // Make sure the objects are promoted. heap->CollectGarbage(OLD_SPACE); heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2)); std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); CHECK(!GlobalHandles::IsNearDeath(h1.location())); CHECK(!GlobalHandles::IsNearDeath(h2.location())); // Incremental marking potentially marked handles before they turned weak. heap->CollectAllGarbage(); CHECK((*h1)->IsString()); CHECK(WeakPointerCleared); CHECK(!GlobalHandles::IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); } TEST(DeleteWeakGlobalHandle) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); h = global_handles->Create(*i); } std::pair*, int> handle_and_id(&h, 1234); GlobalHandles::MakeWeak(h.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scanvenge does not recognize weak reference. heap->CollectGarbage(NEW_SPACE); CHECK(!WeakPointerCleared); // Mark-compact treats weak reference properly. heap->CollectGarbage(OLD_SPACE); CHECK(WeakPointerCleared); } TEST(DoNotPromoteWhiteObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle white = factory->NewStringFromStaticChars("white"); CHECK(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*white)))); heap->CollectGarbage(NEW_SPACE); CHECK(heap->InNewSpace(*white)); } TEST(PromoteGreyOrBlackObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle marked = factory->NewStringFromStaticChars("marked"); IncrementalMarking* marking = heap->incremental_marking(); marking->Stop(); heap->StartIncrementalMarking(); while (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*marked)))) { marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD, IncrementalMarking::FORCE_MARKING, IncrementalMarking::DO_NOT_FORCE_COMPLETION); } heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*marked)); } TEST(BytecodeArray) { static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a}; static const int kRawBytesSize = sizeof(kRawBytes); static const int kFrameSize = 32; static const int kParameterCount = 2; i::FLAG_manual_evacuation_candidates_selection = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); heap::SimulateFullSpace(heap->old_space()); Handle constant_pool = factory->NewFixedArray(5, TENURED); for (int i = 0; i < 5; i++) { Handle number = factory->NewHeapNumber(i); constant_pool->set(i, *number); } // Allocate and initialize BytecodeArray Handle array = factory->NewBytecodeArray( kRawBytesSize, kRawBytes, kFrameSize, kParameterCount, constant_pool); CHECK(array->IsBytecodeArray()); CHECK_EQ(array->length(), (int)sizeof(kRawBytes)); CHECK_EQ(array->frame_size(), kFrameSize); CHECK_EQ(array->parameter_count(), kParameterCount); CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_LE(array->address(), array->GetFirstBytecodeAddress()); CHECK_GE(array->address() + array->BytecodeArraySize(), array->GetFirstBytecodeAddress() + array->length()); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); CHECK_EQ(array->get(i), kRawBytes[i]); } FixedArray* old_constant_pool_address = *constant_pool; // Perform a full garbage collection and force the constant pool to be on an // evacuation candidate. Page* evac_page = Page::FromAddress(constant_pool->address()); evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); heap->CollectAllGarbage(); // BytecodeArray should survive. CHECK_EQ(array->length(), kRawBytesSize); CHECK_EQ(array->frame_size(), kFrameSize); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->get(i), kRawBytes[i]); CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); } // Constant pool should have been migrated. CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_NE(array->constant_pool(), old_constant_pool_address); } static const char* not_so_random_string_table[] = { "abstract", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "var", "void", "volatile", "while", "with", 0 }; static void CheckInternalizedStrings(const char** strings) { Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); for (const char* string = *strings; *strings != 0; string = *strings++) { HandleScope scope(isolate); Handle a = isolate->factory()->InternalizeUtf8String(CStrVector(string)); // InternalizeUtf8String may return a failure if a GC is needed. CHECK(a->IsInternalizedString()); Handle b = factory->InternalizeUtf8String(string); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); b = isolate->factory()->InternalizeUtf8String(CStrVector(string)); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); } } TEST(StringTable) { CcTest::InitializeVM(); v8::HandleScope sc(CcTest::isolate()); CheckInternalizedStrings(not_so_random_string_table); CheckInternalizedStrings(not_so_random_string_table); } TEST(FunctionAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check that we can add properties to function objects. JSReceiver::SetProperty(function, prop_name, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(function, prop_name).ToHandleChecked()); } TEST(ObjectProperties) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); // check for empty CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); // delete first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete first and then second CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete second and then first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // check string and internalized string match const char* string1 = "fisk"; Handle s1 = factory->NewStringFromAsciiChecked(string1); JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check(); Handle s1_string = factory->InternalizeUtf8String(string1); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string)); // check internalized string and string match const char* string2 = "fugl"; Handle s2_string = factory->InternalizeUtf8String(string2); JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check(); Handle s2 = factory->NewStringFromAsciiChecked(string2); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2)); } TEST(JSObjectMaps) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); Handle initial_map(function->initial_map()); // Set a propery Handle twenty_three(Smi::FromInt(23), isolate); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check the map has changed CHECK(*initial_map != obj->map()); } TEST(JSArray) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("Array"); Handle fun_obj = Object::GetProperty( CcTest::i_isolate()->global_object(), name).ToHandleChecked(); Handle function = Handle::cast(fun_obj); // Allocate the object. Handle element; Handle object = factory->NewJSObject(function); Handle array = Handle::cast(object); // We just initialized the VM, no heap allocation failure yet. JSArray::Initialize(array, 0); // Set array length to 0. JSArray::SetLength(array, 0); CHECK_EQ(Smi::FromInt(0), array->length()); // Must be in fast mode. CHECK(array->HasFastSmiOrObjectElements()); // array[length] = name. JSReceiver::SetElement(isolate, array, 0, name, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(1), array->length()); element = i::Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); // Set array length with larger than smi value. JSArray::SetLength(array, static_cast(Smi::kMaxValue) + 1); uint32_t int_length = 0; CHECK(array->length()->ToArrayIndex(&int_length)); CHECK_EQ(static_cast(Smi::kMaxValue) + 1, int_length); CHECK(array->HasDictionaryElements()); // Must be in slow mode. // array[length] = name. JSReceiver::SetElement(isolate, array, int_length, name, SLOPPY).Check(); uint32_t new_int_length = 0; CHECK(array->length()->ToArrayIndex(&new_int_length)); CHECK_EQ(static_cast(int_length), new_int_length - 1); element = Object::GetElement(isolate, array, int_length).ToHandleChecked(); CHECK_EQ(*element, *name); element = Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); } TEST(JSObjectCopy) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 0, first, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 1, second, SLOPPY).Check(); // Make the clone. Handle value1, value2; Handle clone = factory->CopyJSObject(obj); CHECK(!clone.is_identical_to(obj)); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); // Flip the values. JSReceiver::SetProperty(clone, first, two, SLOPPY).Check(); JSReceiver::SetProperty(clone, second, one, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 0, second, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 1, first, SLOPPY).Check(); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); } TEST(StringAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 }; for (int length = 0; length < 100; length++) { v8::HandleScope scope(CcTest::isolate()); char* non_one_byte = NewArray(3 * length + 1); char* one_byte = NewArray(length + 1); non_one_byte[3 * length] = 0; one_byte[length] = 0; for (int i = 0; i < length; i++) { one_byte[i] = 'a'; non_one_byte[3 * i] = chars[0]; non_one_byte[3 * i + 1] = chars[1]; non_one_byte[3 * i + 2] = chars[2]; } Handle non_one_byte_sym = factory->InternalizeUtf8String( Vector(non_one_byte, 3 * length)); CHECK_EQ(length, non_one_byte_sym->length()); Handle one_byte_sym = factory->InternalizeOneByteString(OneByteVector(one_byte, length)); CHECK_EQ(length, one_byte_sym->length()); Handle non_one_byte_str = factory->NewStringFromUtf8(Vector(non_one_byte, 3 * length)) .ToHandleChecked(); non_one_byte_str->Hash(); CHECK_EQ(length, non_one_byte_str->length()); Handle one_byte_str = factory->NewStringFromUtf8(Vector(one_byte, length)) .ToHandleChecked(); one_byte_str->Hash(); CHECK_EQ(length, one_byte_str->length()); DeleteArray(non_one_byte); DeleteArray(one_byte); } } static int ObjectsFoundInHeap(Heap* heap, Handle objs[], int size) { // Count the number of objects found in the heap. int found_count = 0; HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { for (int i = 0; i < size; i++) { if (*objs[i] == obj) { found_count++; } } } return found_count; } TEST(Iteration) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); // Array of objects to scan haep for. const int objs_count = 6; Handle objs[objs_count]; int next_objs_index = 0; // Allocate a JS array to OLD_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewJSArray(10); objs[next_objs_index++] = factory->NewJSArray(10, FAST_HOLEY_ELEMENTS, TENURED); // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij"); objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij", TENURED); // Allocate a large string (for large object space). int large_size = Page::kMaxRegularHeapObjectSize + 1; char* str = new char[large_size]; for (int i = 0; i < large_size - 1; ++i) str[i] = 'a'; str[large_size - 1] = '\0'; objs[next_objs_index++] = factory->NewStringFromAsciiChecked(str, TENURED); delete[] str; // Add a Map object to look for. objs[next_objs_index++] = Handle(HeapObject::cast(*objs[0])->map()); CHECK_EQ(objs_count, next_objs_index); CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count)); } UNINITIALIZED_TEST(TestCodeFlushing) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); i::Isolate* i_isolate = reinterpret_cast(isolate); isolate->Enter(); Factory* factory = i_isolate->factory(); { v8::HandleScope scope(isolate); v8::Context::New(isolate)->Enter(); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(isolate); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(i_isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. i_isolate->heap()->CollectAllGarbage(); i_isolate->heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { i_isolate->heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } isolate->Exit(); isolate->Dispose(); } TEST(TestCodeFlushingPreAged) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // Compile foo, but don't run it. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code has been run so will survive at least one GC. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // The code was only run once, so it should be pre-aged and collected on the // next GC. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); // Execute the function again twice, and ensure it is reset to the young age. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();" "foo();"); } // The code will survive at least two GC now that it is young again. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { CcTest::heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } TEST(TestCodeFlushingIncremental) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use incremental marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); CcTest::heap()->CollectAllGarbage(); } CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // This compile will compile the function again. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();"); } // Simulate several GCs that use incremental marking but make sure // the loop breaks once the function is enqueued as a candidate. for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); if (!function->next_function_link()->IsUndefined(CcTest::i_isolate())) break; CcTest::heap()->CollectAllGarbage(); } // Force optimization while incremental marking is active and while // the function is enqueued as a candidate. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestCodeFlushingIncrementalScavenge) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "var foo = function() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo();" "var bar = function() {" " var x = 23;" "};" "bar();"; Handle foo_name = factory->InternalizeUtf8String("foo"); Handle bar_name = factory->InternalizeUtf8String("bar"); // Perfrom one initial GC to enable code flushing. CcTest::heap()->CollectAllGarbage(); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check functions are compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); Handle func_value2 = Object::GetProperty(isolate->global_object(), bar_name).ToHandleChecked(); CHECK(func_value2->IsJSFunction()); Handle function2 = Handle::cast(func_value2); CHECK(function2->shared()->is_compiled()); // Clear references to functions so that one of them can die. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo = 0; bar = 0;"); } // Bump the code age so that flushing is triggered while the function // object is still located in new-space. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); function2->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the functions are enqueued as // code flushing candidates. Then kill one of the functions. Finally // perform a scavenge while incremental marking is still running. heap::SimulateIncrementalMarking(CcTest::heap()); *function2.location() = NULL; CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking"); // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); } TEST(TestCodeFlushingIncrementalAbort) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. heap->CollectAllGarbage(); heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Bump the code age so that flushing is triggered. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the function is enqueued as // code flushing candidate. heap::SimulateIncrementalMarking(heap); // Enable the debugger and add a breakpoint while incremental marking // is running so that incremental marking aborts and code flushing is // disabled. int position = 0; Handle breakpoint_object(Smi::FromInt(0), isolate); EnableDebugger(CcTest::isolate()); isolate->debug()->SetBreakPoint(function, breakpoint_object, &position); isolate->debug()->ClearAllBreakPoints(); DisableDebugger(CcTest::isolate()); // Force optimization now that code flushing is disabled. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestUseOfIncrementalBarrierOnCompileLazy) { // Turn off always_opt because it interferes with running the built-in for // the last call to g(). i::FLAG_always_opt = false; i::FLAG_allow_natives_syntax = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); CompileRun( "function make_closure(x) {" " return function() { return x + 3 };" "}" "var f = make_closure(5); f();" "var g = make_closure(5);"); // Check f is compiled. Handle f_name = factory->InternalizeUtf8String("f"); Handle f_value = Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked(); Handle f_function = Handle::cast(f_value); CHECK(f_function->is_compiled()); // Check g is not compiled. Handle g_name = factory->InternalizeUtf8String("g"); Handle g_value = Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked(); Handle g_function = Handle::cast(g_value); CHECK(!g_function->is_compiled()); heap::SimulateIncrementalMarking(heap); CompileRun("%OptimizeFunctionOnNextCall(f); f();"); // g should now have available an optimized function, unmarked by gc. The // CompileLazy built-in will discover it and install it in the closure, and // the incremental write barrier should be used. CompileRun("g();"); CHECK(g_function->is_compiled()); } TEST(CompilationCacheCachingBehavior) { // If we do not flush code, or have the compilation cache turned off, this // test is invalid. if (!FLAG_flush_code || !FLAG_compilation_cache) { return; } CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); CompilationCache* compilation_cache = isolate->compilation_cache(); LanguageMode language_mode = construct_language_mode(FLAG_use_strict); v8::HandleScope scope(CcTest::isolate()); const char* raw_source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle source = factory->InternalizeUtf8String(raw_source); Handle native_context = isolate->native_context(); { v8::HandleScope scope(CcTest::isolate()); CompileRun(raw_source); } // The script should be in the cache now. MaybeHandle info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); // Check that the code cache entry survives at least on GC. // (Unless --optimize-for-size, in which case it might get collected // immediately.) if (!FLAG_optimize_for_size) { heap->CollectAllGarbage(); info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); } // Progress code age until it's old and ready for GC. while (!info.ToHandleChecked()->code()->IsOld()) { // To guarantee progress, we have to MakeOlder with different parities. // We can't just use NO_MARKING_PARITY, since e.g. kExecutedOnceCodeAge is // always NO_MARKING_PARITY and the code age only progresses if the parity // is different. info.ToHandleChecked()->code()->MakeOlder(ODD_MARKING_PARITY); info.ToHandleChecked()->code()->MakeOlder(EVEN_MARKING_PARITY); } heap->CollectAllGarbage(); // Ensure code aging cleared the entry from the cache. info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(info.is_null()); } static void OptimizeEmptyFunction(const char* name) { HandleScope scope(CcTest::i_isolate()); EmbeddedVector source; SNPrintF(source, "function %s() { return 0; }" "%s(); %s();" "%%OptimizeFunctionOnNextCall(%s);" "%s();", name, name, name, name, name); CompileRun(source.start()); } // Count the number of native contexts in the weak list of native contexts. int CountNativeContexts() { int count = 0; Object* object = CcTest::heap()->native_contexts_list(); while (!object->IsUndefined(CcTest::i_isolate())) { count++; object = Context::cast(object)->next_context_link(); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context. static int CountOptimizedUserFunctions(v8::Local context) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST); while (object->IsJSFunction() && !JSFunction::cast(object)->shared()->IsBuiltin()) { count++; object = JSFunction::cast(object)->next_function_link(); } return count; } TEST(TestInternalWeakLists) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); // Some flags turn Scavenge collections into Mark-sweep collections // and hence are incompatible with this test case. if (FLAG_gc_global || FLAG_stress_compaction) return; FLAG_retain_maps_for_n_gc = 0; static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create a number of global contests which gets linked together. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); // Collect garbage that might have been created by one of the // installed extensions. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(i + 1, CountNativeContexts()); ctx[i]->Enter(); // Create a handle scope so no function objects get stuck in the outer // handle scope. HandleScope scope(isolate); CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); // Remove function f1, and CompileRun("f1=null"); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); } // Mark compact handles the weak references. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); // Get rid of f3 and f5 in the same way. CompileRun("f3=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); CompileRun("f5=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); ctx[i]->Exit(); } // Force compilation cache cleanup. CcTest::heap()->NotifyContextDisposed(true); CcTest::heap()->CollectAllGarbage(); // Dispose the native contexts one by one. for (int i = 0; i < kNumTestContexts; i++) { // TODO(dcarney): is there a better way to do this? i::Object** unsafe = reinterpret_cast(*ctx[i]); *unsafe = CcTest::heap()->undefined_value(); ctx[i].Clear(); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(i::NEW_SPACE); CHECK_EQ(kNumTestContexts - i, CountNativeContexts()); } // Mark compact handles the weak references. CcTest::heap()->CollectAllGarbage(); CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts()); } CHECK_EQ(0, CountNativeContexts()); } // Count the number of native contexts in the weak list of native contexts // causing a GC after the specified number of elements. static int CountNativeContextsWithGC(Isolate* isolate, int n) { Heap* heap = isolate->heap(); int count = 0; Handle object(heap->native_contexts_list(), isolate); while (!object->IsUndefined(isolate)) { count++; if (count == n) heap->CollectAllGarbage(); object = Handle(Context::cast(*object)->next_context_link(), isolate); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context causing a GC after the // specified number of elements. static int CountOptimizedUserFunctionsWithGC(v8::Local context, int n) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Isolate* isolate = icontext->GetIsolate(); Handle object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST), isolate); while (object->IsJSFunction() && !Handle::cast(object)->shared()->IsBuiltin()) { count++; if (count == n) isolate->heap()->CollectAllGarbage(); object = Handle( Object::cast(JSFunction::cast(*object)->next_function_link()), isolate); } return count; } TEST(TestInternalWeakListsTraverseWithGC) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create an number of contexts and check the length of the weak list both // with and without GCs while iterating the list. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); CHECK_EQ(i + 1, CountNativeContexts()); CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1)); } ctx[0]->Enter(); // Compile a number of functions the length of the weak list of optimized // functions both with and without GCs while iterating the list. CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(1, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(2, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(3, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(4, CountOptimizedUserFunctionsWithGC(ctx[0], 2)); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(5, CountOptimizedUserFunctionsWithGC(ctx[0], 4)); ctx[0]->Exit(); } TEST(TestSizeOfRegExpCode) { if (!FLAG_regexp_optimization) return; v8::V8::Initialize(); Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); LocalContext context; // Adjust source below and this check to match // RegExpImple::kRegExpTooLargeToOptimize. CHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB); // Compile a regexp that is much larger if we are using regexp optimizations. CompileRun( "var reg_exp_source = '(?:a|bc|def|ghij|klmno|pqrstu)';" "var half_size_reg_exp;" "while (reg_exp_source.length < 20 * 1024) {" " half_size_reg_exp = reg_exp_source;" " reg_exp_source = reg_exp_source + reg_exp_source;" "}" // Flatten string. "reg_exp_source.match(/f/);"); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(reg_exp_source);"); CcTest::heap()->CollectAllGarbage(); int size_with_regexp = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(half_size_reg_exp);"); CcTest::heap()->CollectAllGarbage(); int size_with_optimized_regexp = static_cast(CcTest::heap()->SizeOfObjects()); int size_of_regexp_code = size_with_regexp - initial_size; // On some platforms the debug-code flag causes huge amounts of regexp code // to be emitted, breaking this test. if (!FLAG_debug_code) { CHECK_LE(size_of_regexp_code, 1 * MB); } // Small regexp is half the size, but compiles to more than twice the code // due to the optimization steps. CHECK_GE(size_with_optimized_regexp, size_with_regexp + size_of_regexp_code * 2); } HEAP_TEST(TestSizeOfObjects) { v8::V8::Initialize(); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); { // Allocate objects on several different old-space pages so that // concurrent sweeper threads will be busy sweeping the old space on // subsequent GC runs. AlwaysAllocateScope always_allocate(CcTest::i_isolate()); int filler_size = static_cast(FixedArray::SizeFor(8192)); for (int i = 1; i <= 100; i++) { CcTest::heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked(); CHECK_EQ(initial_size + i * filler_size, static_cast(CcTest::heap()->SizeOfObjects())); } } // The heap size should go back to initial size after a full GC, even // though sweeping didn't finish yet. CcTest::heap()->CollectAllGarbage(); // Normally sweeping would not be complete here, but no guarantees. CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); // Waiting for sweeper threads should not change heap size. if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); } TEST(TestAlignmentCalculations) { // Maximum fill amounts are consistent. int maximum_double_misalignment = kDoubleSize - kPointerSize; int maximum_simd128_misalignment = kSimd128Size - kPointerSize; int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned); CHECK_EQ(0, max_word_fill); int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned); CHECK_EQ(maximum_double_misalignment, max_double_fill); int max_double_unaligned_fill = Heap::GetMaximumFillToAlign(kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, max_double_unaligned_fill); int max_simd128_unaligned_fill = Heap::GetMaximumFillToAlign(kSimd128Unaligned); CHECK_EQ(maximum_simd128_misalignment, max_simd128_unaligned_fill); Address base = static_cast(NULL); int fill = 0; // Word alignment never requires fill. fill = Heap::GetFillToAlign(base, kWordAligned); CHECK_EQ(0, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kWordAligned); CHECK_EQ(0, fill); // No fill is required when address is double aligned. fill = Heap::GetFillToAlign(base, kDoubleAligned); CHECK_EQ(0, fill); // Fill is required if address is not double aligned. fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleAligned); CHECK_EQ(maximum_double_misalignment, fill); // kDoubleUnaligned has the opposite fill amounts. fill = Heap::GetFillToAlign(base, kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleUnaligned); CHECK_EQ(0, fill); // 128 bit SIMD types have 2 or 4 possible alignments, depending on platform. fill = Heap::GetFillToAlign(base, kSimd128Unaligned); CHECK_EQ((3 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kSimd128Unaligned); CHECK_EQ((2 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + 2 * kPointerSize, kSimd128Unaligned); CHECK_EQ(kPointerSize, fill); fill = Heap::GetFillToAlign(base + 3 * kPointerSize, kSimd128Unaligned); CHECK_EQ(0, fill); } static HeapObject* NewSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->new_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get new space allocation into the desired alignment. static Address AlignNewSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); if (fill) { NewSpaceAllocateAligned(fill + offset, kWordAligned); } return *top_addr; } TEST(TestAlignedAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); Address start; HeapObject* obj; HeapObject* filler; if (double_misalignment) { // Allocate a pointer sized object that must be double aligned at an // aligned address. start = AlignNewSpace(kDoubleAligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); // Allocate a second pointer sized object that must be double aligned at an // unaligned address. start = AlignNewSpace(kDoubleAligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); // Similarly for kDoubleUnaligned. start = AlignNewSpace(kDoubleUnaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kDoubleUnaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignNewSpace(kSimd128Unaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kSimd128Size - kPointerSize); CHECK_EQ(kPointerSize + kSimd128Size - kPointerSize, *top_addr - start); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignNewSpace(kSimd128Unaligned, 2 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == 2 * kPointerSize); CHECK_EQ(kPointerSize + 2 * kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, 3 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + kPointerSize, *top_addr - start); } } static HeapObject* OldSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->old_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get old space allocation into the desired alignment. static Address AlignOldSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->old_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); int allocation = fill + offset; if (allocation) { OldSpaceAllocateAligned(allocation, kWordAligned); } Address top = *top_addr; // Now force the remaining allocation onto the free list. CcTest::heap()->old_space()->EmptyAllocationInfo(); return top; } // Test the case where allocation must be done from the free list, so filler // may precede or follow the object. TEST(TestAlignedOverAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address start; HeapObject* obj; HeapObject* filler1; HeapObject* filler2; if (double_misalignment) { start = AlignOldSpace(kDoubleAligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleAligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1); CHECK(filler1->IsFiller()); CHECK(filler1->Size() == kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Similarly for kDoubleUnaligned. start = AlignOldSpace(kDoubleUnaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleUnaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignOldSpace(kSimd128Unaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object after the object. filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); start = AlignOldSpace(kSimd128Unaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == 2 * kPointerSize); filler2 = HeapObject::FromAddress(start + 3 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == kPointerSize); start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); filler2 = HeapObject::FromAddress(start + 2 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == 2 * kPointerSize); } } TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { CcTest::InitializeVM(); HeapIterator iterator(CcTest::heap()); intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects(); intptr_t size_of_objects_2 = 0; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (!obj->IsFreeSpace()) { size_of_objects_2 += obj->Size(); } } // Delta must be within 5% of the larger result. // TODO(gc): Tighten this up by distinguishing between byte // arrays that are real and those that merely mark free space // on the heap. if (size_of_objects_1 > size_of_objects_2) { intptr_t delta = size_of_objects_1 - size_of_objects_2; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_1 / 20, delta); } else { intptr_t delta = size_of_objects_2 - size_of_objects_1; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_2 / 20, delta); } } static void FillUpNewSpace(NewSpace* new_space) { // Fill up new space to the point that it is completely full. Make sure // that the scavenger does not undo the filling. Heap* heap = new_space->heap(); Isolate* isolate = heap->isolate(); Factory* factory = isolate->factory(); HandleScope scope(isolate); AlwaysAllocateScope always_allocate(isolate); intptr_t available = new_space->Capacity() - new_space->Size(); intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; for (intptr_t i = 0; i < number_of_fillers; i++) { CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); } } TEST(GrowAndShrinkNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); NewSpace* new_space = heap->new_space(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } // Explicitly growing should double the space capacity. intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); old_capacity = new_space->TotalCapacity(); FillUpNewSpace(new_space); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Explicitly shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Let the scavenger empty the new space. heap->CollectGarbage(NEW_SPACE); CHECK_LE(new_space->Size(), old_capacity); // Explicitly shrinking should halve the space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == 2 * new_capacity); // Consecutive shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_space->Shrink(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } TEST(CollectingAllAvailableGarbageShrinksNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } v8::HandleScope scope(CcTest::isolate()); NewSpace* new_space = heap->new_space(); intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); FillUpNewSpace(new_space); heap->CollectAllAvailableGarbage(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } static int NumberOfGlobalObjects() { int count = 0; HeapIterator iterator(CcTest::heap()); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsJSGlobalObject()) count++; } return count; } // Test that we don't embed maps from foreign contexts into // optimized code. TEST(LeakNativeContextViaMap) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = {x: 42}"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o.x; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); v8::Local::New(isolate, ctx1)->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } // Test that we don't embed functions from foreign contexts into // optimized code. TEST(LeakNativeContextViaFunction) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = function() { return 42; }"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f(x) { return x(); }" "for (var i = 0; i < 10; ++i) f(o);" "%OptimizeFunctionOnNextCall(f);" "f(o);"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapKeyed) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = [42, 43]"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o[0]; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapProto) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent
()); CHECK(code->IsCode()); HeapObject* obj = HeapObject::cast(*code); Address obj_addr = obj->address(); for (int i = 0; i < obj->Size(); i += kPointerSize) { Object* found = isolate->FindCodeObject(obj_addr + i); CHECK_EQ(*code, found); } Handle copy = isolate->factory()->NewCode( desc, Code::ComputeFlags(Code::STUB), Handle()); HeapObject* obj_copy = HeapObject::cast(*copy); Object* not_right = isolate->FindCodeObject(obj_copy->address() + obj_copy->Size() / 2); CHECK(not_right != *code); } TEST(HandleNull) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); HandleScope outer_scope(isolate); LocalContext context; Handle n(static_cast(nullptr), isolate); CHECK(!n.is_null()); } TEST(HeapObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); HandleScope sc(isolate); Handle value = factory->NewNumber(1.000123); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(1.000123, value->Number()); value = factory->NewNumber(1.0); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1.0, value->Number()); value = factory->NewNumberFromInt(1024); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1024.0, value->Number()); value = factory->NewNumberFromInt(Smi::kMinValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMinValue, Handle::cast(value)->value()); value = factory->NewNumberFromInt(Smi::kMaxValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMaxValue, Handle::cast(value)->value()); #if !defined(V8_TARGET_ARCH_64_BIT) // TODO(lrn): We need a NumberFromIntptr function in order to test this. value = factory->NewNumberFromInt(Smi::kMinValue - 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(Smi::kMinValue - 1), value->Number()); #endif value = factory->NewNumberFromUint(static_cast(Smi::kMaxValue) + 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(Smi::kMaxValue) + 1), value->Number()); value = factory->NewNumberFromUint(static_cast(1) << 31); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(1) << 31), value->Number()); // nan oddball checks CHECK(factory->nan_value()->IsNumber()); CHECK(std::isnan(factory->nan_value()->Number())); Handle s = factory->NewStringFromStaticChars("fisk hest "); CHECK(s->IsString()); CHECK_EQ(10, s->length()); Handle object_string = Handle::cast(factory->Object_string()); Handle global( CcTest::i_isolate()->context()->global_object()); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string)); // Check ToString for oddballs CheckOddball(isolate, heap->true_value(), "true"); CheckOddball(isolate, heap->false_value(), "false"); CheckOddball(isolate, heap->null_value(), "null"); CheckOddball(isolate, heap->undefined_value(), "undefined"); // Check ToString for Smis CheckSmi(isolate, 0, "0"); CheckSmi(isolate, 42, "42"); CheckSmi(isolate, -42, "-42"); // Check ToString for Numbers CheckNumber(isolate, 1.1, "1.1"); CheckFindCodeObject(isolate); } template static void CheckSimdValue(T* value, LANE_TYPE lane_values[LANES], LANE_TYPE other_value) { // Check against lane_values, and check that all lanes can be set to // other_value without disturbing the other lanes. for (int i = 0; i < LANES; i++) { CHECK_EQ(lane_values[i], value->get_lane(i)); } for (int i = 0; i < LANES; i++) { value->set_lane(i, other_value); // change the value for (int j = 0; j < LANES; j++) { if (i != j) CHECK_EQ(lane_values[j], value->get_lane(j)); else CHECK_EQ(other_value, value->get_lane(j)); } value->set_lane(i, lane_values[i]); // restore the lane } CHECK(value->BooleanValue()); // SIMD values are 'true'. } TEST(SimdObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Float32x4 { float lanes[4] = {1, 2, 3, 4}; float quiet_NaN = std::numeric_limits::quiet_NaN(); float signaling_NaN = std::numeric_limits::signaling_NaN(); Handle value = factory->NewFloat32x4(lanes); CHECK(value->IsFloat32x4()); CheckSimdValue(*value, lanes, 3.14f); // Check special lane values. value->set_lane(1, -0.0); CHECK_EQ(-0.0f, value->get_lane(1)); CHECK(std::signbit(value->get_lane(1))); // Sign bit should be preserved. value->set_lane(2, quiet_NaN); CHECK(std::isnan(value->get_lane(2))); value->set_lane(3, signaling_NaN); CHECK(std::isnan(value->get_lane(3))); #ifdef OBJECT_PRINT // Check value printing. { value = factory->NewFloat32x4(lanes); std::ostringstream os; value->Float32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); } { float special_lanes[4] = {0, -0.0, quiet_NaN, signaling_NaN}; value = factory->NewFloat32x4(special_lanes); std::ostringstream os; value->Float32x4Print(os); // Value printing doesn't preserve signed zeroes. CHECK_EQ("0, 0, NaN, NaN", os.str()); } #endif // OBJECT_PRINT } // Int32x4 { int32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewInt32x4(lanes); CHECK(value->IsInt32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Int32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Uint32x4 { uint32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewUint32x4(lanes); CHECK(value->IsUint32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Bool32x4 { bool lanes[4] = {true, false, true, false}; Handle value = factory->NewBool32x4(lanes); CHECK(value->IsBool32x4()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool32x4Print(os); CHECK_EQ("true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int16x8 { int16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewInt16x8(lanes); CHECK(value->IsInt16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Int16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Uint16x8 { uint16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewUint16x8(lanes); CHECK(value->IsUint16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Bool16x8 { bool lanes[8] = {true, false, true, false, true, false, true, false}; Handle value = factory->NewBool16x8(lanes); CHECK(value->IsBool16x8()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool16x8Print(os); CHECK_EQ("true, false, true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int8x16 { int8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewInt8x16(lanes); CHECK(value->IsInt8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Int8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Uint8x16 { uint8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewUint8x16(lanes); CHECK(value->IsUint8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Bool8x16 { bool lanes[16] = {true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}; Handle value = factory->NewBool8x16(lanes); CHECK(value->IsBool8x16()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool8x16Print(os); CHECK_EQ( "true, false, true, false, true, false, true, false, true, false, " "true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } } TEST(Tagging) { CcTest::InitializeVM(); int request = 24; CHECK_EQ(request, static_cast(OBJECT_POINTER_ALIGN(request))); CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi()); CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi()); } TEST(GarbageCollection) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Check GC. heap->CollectGarbage(NEW_SPACE); Handle global( CcTest::i_isolate()->context()->global_object()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle prop_namex = factory->InternalizeUtf8String("theSlotx"); Handle obj_name = factory->InternalizeUtf8String("theObject"); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); { HandleScope inner_scope(isolate); // Allocate a function and keep it in global object's property. Handle function = factory->NewFunction(name); JSReceiver::SetProperty(global, name, function, SLOPPY).Check(); // Allocate an object. Unrooted after leaving the scope. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_namex, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(obj, prop_namex).ToHandleChecked()); } heap->CollectGarbage(NEW_SPACE); // Function should be alive. CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name)); // Check function is retained. Handle func_value = Object::GetProperty(global, name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); { HandleScope inner_scope(isolate); // Allocate another object, make it reachable from global. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); } // After gc, it should survive. heap->CollectGarbage(NEW_SPACE); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name)); Handle obj = Object::GetProperty(global, obj_name).ToHandleChecked(); CHECK(obj->IsJSObject()); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); } static void VerifyStringAllocation(Isolate* isolate, const char* string) { HandleScope scope(isolate); Handle s = isolate->factory()->NewStringFromUtf8( CStrVector(string)).ToHandleChecked(); CHECK_EQ(StrLength(string), s->length()); for (int index = 0; index < s->length(); index++) { CHECK_EQ(static_cast(string[index]), s->Get(index)); } } TEST(String) { CcTest::InitializeVM(); Isolate* isolate = reinterpret_cast(CcTest::isolate()); VerifyStringAllocation(isolate, "a"); VerifyStringAllocation(isolate, "ab"); VerifyStringAllocation(isolate, "abc"); VerifyStringAllocation(isolate, "abcd"); VerifyStringAllocation(isolate, "fiskerdrengen er paa havet"); } TEST(LocalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* name = "Kasper the spunky"; Handle string = factory->NewStringFromAsciiChecked(name); CHECK_EQ(StrLength(name), string->length()); } TEST(GlobalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); Handle h1; Handle h2; Handle h3; Handle h4; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); h3 = global_handles->Create(*i); h4 = global_handles->Create(*u); } // after gc, it should survive heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK((*h3)->IsString()); CHECK((*h4)->IsHeapNumber()); CHECK_EQ(*h3, *h1); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h3.location()); CHECK_EQ(*h4, *h2); GlobalHandles::Destroy(h2.location()); GlobalHandles::Destroy(h4.location()); } static bool WeakPointerCleared = false; static void TestWeakGlobalHandleCallback( const v8::WeakCallbackInfo& data) { std::pair*, int>* p = reinterpret_cast*, int>*>( data.GetParameter()); if (p->second == 1234) WeakPointerCleared = true; p->first->Reset(); } TEST(WeakGlobalHandlesScavenge) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scavenge treats weak pointers as normal roots. heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK(!WeakPointerCleared); CHECK(!global_handles->IsNearDeath(h2.location())); CHECK(!global_handles->IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h2.location()); } TEST(WeakGlobalHandlesMark) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } // Make sure the objects are promoted. heap->CollectGarbage(OLD_SPACE); heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2)); std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); CHECK(!GlobalHandles::IsNearDeath(h1.location())); CHECK(!GlobalHandles::IsNearDeath(h2.location())); // Incremental marking potentially marked handles before they turned weak. heap->CollectAllGarbage(); CHECK((*h1)->IsString()); CHECK(WeakPointerCleared); CHECK(!GlobalHandles::IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); } TEST(DeleteWeakGlobalHandle) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); h = global_handles->Create(*i); } std::pair*, int> handle_and_id(&h, 1234); GlobalHandles::MakeWeak(h.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scanvenge does not recognize weak reference. heap->CollectGarbage(NEW_SPACE); CHECK(!WeakPointerCleared); // Mark-compact treats weak reference properly. heap->CollectGarbage(OLD_SPACE); CHECK(WeakPointerCleared); } TEST(DoNotPromoteWhiteObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle white = factory->NewStringFromStaticChars("white"); CHECK(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*white)))); heap->CollectGarbage(NEW_SPACE); CHECK(heap->InNewSpace(*white)); } TEST(PromoteGreyOrBlackObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle marked = factory->NewStringFromStaticChars("marked"); IncrementalMarking* marking = heap->incremental_marking(); marking->Stop(); heap->StartIncrementalMarking(); while (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*marked)))) { marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD, IncrementalMarking::FORCE_MARKING, IncrementalMarking::DO_NOT_FORCE_COMPLETION); } heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*marked)); } TEST(BytecodeArray) { static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a}; static const int kRawBytesSize = sizeof(kRawBytes); static const int kFrameSize = 32; static const int kParameterCount = 2; i::FLAG_manual_evacuation_candidates_selection = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); heap::SimulateFullSpace(heap->old_space()); Handle constant_pool = factory->NewFixedArray(5, TENURED); for (int i = 0; i < 5; i++) { Handle number = factory->NewHeapNumber(i); constant_pool->set(i, *number); } // Allocate and initialize BytecodeArray Handle array = factory->NewBytecodeArray( kRawBytesSize, kRawBytes, kFrameSize, kParameterCount, constant_pool); CHECK(array->IsBytecodeArray()); CHECK_EQ(array->length(), (int)sizeof(kRawBytes)); CHECK_EQ(array->frame_size(), kFrameSize); CHECK_EQ(array->parameter_count(), kParameterCount); CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_LE(array->address(), array->GetFirstBytecodeAddress()); CHECK_GE(array->address() + array->BytecodeArraySize(), array->GetFirstBytecodeAddress() + array->length()); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); CHECK_EQ(array->get(i), kRawBytes[i]); } FixedArray* old_constant_pool_address = *constant_pool; // Perform a full garbage collection and force the constant pool to be on an // evacuation candidate. Page* evac_page = Page::FromAddress(constant_pool->address()); evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); heap->CollectAllGarbage(); // BytecodeArray should survive. CHECK_EQ(array->length(), kRawBytesSize); CHECK_EQ(array->frame_size(), kFrameSize); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->get(i), kRawBytes[i]); CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); } // Constant pool should have been migrated. CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_NE(array->constant_pool(), old_constant_pool_address); } static const char* not_so_random_string_table[] = { "abstract", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "var", "void", "volatile", "while", "with", 0 }; static void CheckInternalizedStrings(const char** strings) { Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); for (const char* string = *strings; *strings != 0; string = *strings++) { HandleScope scope(isolate); Handle a = isolate->factory()->InternalizeUtf8String(CStrVector(string)); // InternalizeUtf8String may return a failure if a GC is needed. CHECK(a->IsInternalizedString()); Handle b = factory->InternalizeUtf8String(string); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); b = isolate->factory()->InternalizeUtf8String(CStrVector(string)); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); } } TEST(StringTable) { CcTest::InitializeVM(); v8::HandleScope sc(CcTest::isolate()); CheckInternalizedStrings(not_so_random_string_table); CheckInternalizedStrings(not_so_random_string_table); } TEST(FunctionAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check that we can add properties to function objects. JSReceiver::SetProperty(function, prop_name, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(function, prop_name).ToHandleChecked()); } TEST(ObjectProperties) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); // check for empty CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); // delete first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete first and then second CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete second and then first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // check string and internalized string match const char* string1 = "fisk"; Handle s1 = factory->NewStringFromAsciiChecked(string1); JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check(); Handle s1_string = factory->InternalizeUtf8String(string1); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string)); // check internalized string and string match const char* string2 = "fugl"; Handle s2_string = factory->InternalizeUtf8String(string2); JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check(); Handle s2 = factory->NewStringFromAsciiChecked(string2); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2)); } TEST(JSObjectMaps) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); Handle initial_map(function->initial_map()); // Set a propery Handle twenty_three(Smi::FromInt(23), isolate); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check the map has changed CHECK(*initial_map != obj->map()); } TEST(JSArray) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("Array"); Handle fun_obj = Object::GetProperty( CcTest::i_isolate()->global_object(), name).ToHandleChecked(); Handle function = Handle::cast(fun_obj); // Allocate the object. Handle element; Handle object = factory->NewJSObject(function); Handle array = Handle::cast(object); // We just initialized the VM, no heap allocation failure yet. JSArray::Initialize(array, 0); // Set array length to 0. JSArray::SetLength(array, 0); CHECK_EQ(Smi::FromInt(0), array->length()); // Must be in fast mode. CHECK(array->HasFastSmiOrObjectElements()); // array[length] = name. JSReceiver::SetElement(isolate, array, 0, name, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(1), array->length()); element = i::Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); // Set array length with larger than smi value. JSArray::SetLength(array, static_cast(Smi::kMaxValue) + 1); uint32_t int_length = 0; CHECK(array->length()->ToArrayIndex(&int_length)); CHECK_EQ(static_cast(Smi::kMaxValue) + 1, int_length); CHECK(array->HasDictionaryElements()); // Must be in slow mode. // array[length] = name. JSReceiver::SetElement(isolate, array, int_length, name, SLOPPY).Check(); uint32_t new_int_length = 0; CHECK(array->length()->ToArrayIndex(&new_int_length)); CHECK_EQ(static_cast(int_length), new_int_length - 1); element = Object::GetElement(isolate, array, int_length).ToHandleChecked(); CHECK_EQ(*element, *name); element = Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); } TEST(JSObjectCopy) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 0, first, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 1, second, SLOPPY).Check(); // Make the clone. Handle value1, value2; Handle clone = factory->CopyJSObject(obj); CHECK(!clone.is_identical_to(obj)); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); // Flip the values. JSReceiver::SetProperty(clone, first, two, SLOPPY).Check(); JSReceiver::SetProperty(clone, second, one, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 0, second, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 1, first, SLOPPY).Check(); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); } TEST(StringAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 }; for (int length = 0; length < 100; length++) { v8::HandleScope scope(CcTest::isolate()); char* non_one_byte = NewArray(3 * length + 1); char* one_byte = NewArray(length + 1); non_one_byte[3 * length] = 0; one_byte[length] = 0; for (int i = 0; i < length; i++) { one_byte[i] = 'a'; non_one_byte[3 * i] = chars[0]; non_one_byte[3 * i + 1] = chars[1]; non_one_byte[3 * i + 2] = chars[2]; } Handle non_one_byte_sym = factory->InternalizeUtf8String( Vector(non_one_byte, 3 * length)); CHECK_EQ(length, non_one_byte_sym->length()); Handle one_byte_sym = factory->InternalizeOneByteString(OneByteVector(one_byte, length)); CHECK_EQ(length, one_byte_sym->length()); Handle non_one_byte_str = factory->NewStringFromUtf8(Vector(non_one_byte, 3 * length)) .ToHandleChecked(); non_one_byte_str->Hash(); CHECK_EQ(length, non_one_byte_str->length()); Handle one_byte_str = factory->NewStringFromUtf8(Vector(one_byte, length)) .ToHandleChecked(); one_byte_str->Hash(); CHECK_EQ(length, one_byte_str->length()); DeleteArray(non_one_byte); DeleteArray(one_byte); } } static int ObjectsFoundInHeap(Heap* heap, Handle objs[], int size) { // Count the number of objects found in the heap. int found_count = 0; HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { for (int i = 0; i < size; i++) { if (*objs[i] == obj) { found_count++; } } } return found_count; } TEST(Iteration) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); // Array of objects to scan haep for. const int objs_count = 6; Handle objs[objs_count]; int next_objs_index = 0; // Allocate a JS array to OLD_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewJSArray(10); objs[next_objs_index++] = factory->NewJSArray(10, FAST_HOLEY_ELEMENTS, TENURED); // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij"); objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij", TENURED); // Allocate a large string (for large object space). int large_size = Page::kMaxRegularHeapObjectSize + 1; char* str = new char[large_size]; for (int i = 0; i < large_size - 1; ++i) str[i] = 'a'; str[large_size - 1] = '\0'; objs[next_objs_index++] = factory->NewStringFromAsciiChecked(str, TENURED); delete[] str; // Add a Map object to look for. objs[next_objs_index++] = Handle(HeapObject::cast(*objs[0])->map()); CHECK_EQ(objs_count, next_objs_index); CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count)); } UNINITIALIZED_TEST(TestCodeFlushing) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); i::Isolate* i_isolate = reinterpret_cast(isolate); isolate->Enter(); Factory* factory = i_isolate->factory(); { v8::HandleScope scope(isolate); v8::Context::New(isolate)->Enter(); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(isolate); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(i_isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. i_isolate->heap()->CollectAllGarbage(); i_isolate->heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { i_isolate->heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } isolate->Exit(); isolate->Dispose(); } TEST(TestCodeFlushingPreAged) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // Compile foo, but don't run it. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code has been run so will survive at least one GC. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // The code was only run once, so it should be pre-aged and collected on the // next GC. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); // Execute the function again twice, and ensure it is reset to the young age. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();" "foo();"); } // The code will survive at least two GC now that it is young again. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { CcTest::heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } TEST(TestCodeFlushingIncremental) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use incremental marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); CcTest::heap()->CollectAllGarbage(); } CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // This compile will compile the function again. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();"); } // Simulate several GCs that use incremental marking but make sure // the loop breaks once the function is enqueued as a candidate. for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); if (!function->next_function_link()->IsUndefined(CcTest::i_isolate())) break; CcTest::heap()->CollectAllGarbage(); } // Force optimization while incremental marking is active and while // the function is enqueued as a candidate. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestCodeFlushingIncrementalScavenge) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "var foo = function() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo();" "var bar = function() {" " var x = 23;" "};" "bar();"; Handle foo_name = factory->InternalizeUtf8String("foo"); Handle bar_name = factory->InternalizeUtf8String("bar"); // Perfrom one initial GC to enable code flushing. CcTest::heap()->CollectAllGarbage(); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check functions are compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); Handle func_value2 = Object::GetProperty(isolate->global_object(), bar_name).ToHandleChecked(); CHECK(func_value2->IsJSFunction()); Handle function2 = Handle::cast(func_value2); CHECK(function2->shared()->is_compiled()); // Clear references to functions so that one of them can die. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo = 0; bar = 0;"); } // Bump the code age so that flushing is triggered while the function // object is still located in new-space. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); function2->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the functions are enqueued as // code flushing candidates. Then kill one of the functions. Finally // perform a scavenge while incremental marking is still running. heap::SimulateIncrementalMarking(CcTest::heap()); *function2.location() = NULL; CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking"); // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); } TEST(TestCodeFlushingIncrementalAbort) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. heap->CollectAllGarbage(); heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Bump the code age so that flushing is triggered. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the function is enqueued as // code flushing candidate. heap::SimulateIncrementalMarking(heap); // Enable the debugger and add a breakpoint while incremental marking // is running so that incremental marking aborts and code flushing is // disabled. int position = 0; Handle breakpoint_object(Smi::FromInt(0), isolate); EnableDebugger(CcTest::isolate()); isolate->debug()->SetBreakPoint(function, breakpoint_object, &position); isolate->debug()->ClearAllBreakPoints(); DisableDebugger(CcTest::isolate()); // Force optimization now that code flushing is disabled. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestUseOfIncrementalBarrierOnCompileLazy) { // Turn off always_opt because it interferes with running the built-in for // the last call to g(). i::FLAG_always_opt = false; i::FLAG_allow_natives_syntax = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); CompileRun( "function make_closure(x) {" " return function() { return x + 3 };" "}" "var f = make_closure(5); f();" "var g = make_closure(5);"); // Check f is compiled. Handle f_name = factory->InternalizeUtf8String("f"); Handle f_value = Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked(); Handle f_function = Handle::cast(f_value); CHECK(f_function->is_compiled()); // Check g is not compiled. Handle g_name = factory->InternalizeUtf8String("g"); Handle g_value = Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked(); Handle g_function = Handle::cast(g_value); CHECK(!g_function->is_compiled()); heap::SimulateIncrementalMarking(heap); CompileRun("%OptimizeFunctionOnNextCall(f); f();"); // g should now have available an optimized function, unmarked by gc. The // CompileLazy built-in will discover it and install it in the closure, and // the incremental write barrier should be used. CompileRun("g();"); CHECK(g_function->is_compiled()); } TEST(CompilationCacheCachingBehavior) { // If we do not flush code, or have the compilation cache turned off, this // test is invalid. if (!FLAG_flush_code || !FLAG_compilation_cache) { return; } CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); CompilationCache* compilation_cache = isolate->compilation_cache(); LanguageMode language_mode = construct_language_mode(FLAG_use_strict); v8::HandleScope scope(CcTest::isolate()); const char* raw_source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle source = factory->InternalizeUtf8String(raw_source); Handle native_context = isolate->native_context(); { v8::HandleScope scope(CcTest::isolate()); CompileRun(raw_source); } // The script should be in the cache now. MaybeHandle info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); // Check that the code cache entry survives at least on GC. // (Unless --optimize-for-size, in which case it might get collected // immediately.) if (!FLAG_optimize_for_size) { heap->CollectAllGarbage(); info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); } // Progress code age until it's old and ready for GC. while (!info.ToHandleChecked()->code()->IsOld()) { // To guarantee progress, we have to MakeOlder with different parities. // We can't just use NO_MARKING_PARITY, since e.g. kExecutedOnceCodeAge is // always NO_MARKING_PARITY and the code age only progresses if the parity // is different. info.ToHandleChecked()->code()->MakeOlder(ODD_MARKING_PARITY); info.ToHandleChecked()->code()->MakeOlder(EVEN_MARKING_PARITY); } heap->CollectAllGarbage(); // Ensure code aging cleared the entry from the cache. info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(info.is_null()); } static void OptimizeEmptyFunction(const char* name) { HandleScope scope(CcTest::i_isolate()); EmbeddedVector source; SNPrintF(source, "function %s() { return 0; }" "%s(); %s();" "%%OptimizeFunctionOnNextCall(%s);" "%s();", name, name, name, name, name); CompileRun(source.start()); } // Count the number of native contexts in the weak list of native contexts. int CountNativeContexts() { int count = 0; Object* object = CcTest::heap()->native_contexts_list(); while (!object->IsUndefined(CcTest::i_isolate())) { count++; object = Context::cast(object)->next_context_link(); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context. static int CountOptimizedUserFunctions(v8::Local context) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST); while (object->IsJSFunction() && !JSFunction::cast(object)->shared()->IsBuiltin()) { count++; object = JSFunction::cast(object)->next_function_link(); } return count; } TEST(TestInternalWeakLists) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); // Some flags turn Scavenge collections into Mark-sweep collections // and hence are incompatible with this test case. if (FLAG_gc_global || FLAG_stress_compaction) return; FLAG_retain_maps_for_n_gc = 0; static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create a number of global contests which gets linked together. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); // Collect garbage that might have been created by one of the // installed extensions. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(i + 1, CountNativeContexts()); ctx[i]->Enter(); // Create a handle scope so no function objects get stuck in the outer // handle scope. HandleScope scope(isolate); CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); // Remove function f1, and CompileRun("f1=null"); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); } // Mark compact handles the weak references. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); // Get rid of f3 and f5 in the same way. CompileRun("f3=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); CompileRun("f5=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); ctx[i]->Exit(); } // Force compilation cache cleanup. CcTest::heap()->NotifyContextDisposed(true); CcTest::heap()->CollectAllGarbage(); // Dispose the native contexts one by one. for (int i = 0; i < kNumTestContexts; i++) { // TODO(dcarney): is there a better way to do this? i::Object** unsafe = reinterpret_cast(*ctx[i]); *unsafe = CcTest::heap()->undefined_value(); ctx[i].Clear(); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(i::NEW_SPACE); CHECK_EQ(kNumTestContexts - i, CountNativeContexts()); } // Mark compact handles the weak references. CcTest::heap()->CollectAllGarbage(); CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts()); } CHECK_EQ(0, CountNativeContexts()); } // Count the number of native contexts in the weak list of native contexts // causing a GC after the specified number of elements. static int CountNativeContextsWithGC(Isolate* isolate, int n) { Heap* heap = isolate->heap(); int count = 0; Handle object(heap->native_contexts_list(), isolate); while (!object->IsUndefined(isolate)) { count++; if (count == n) heap->CollectAllGarbage(); object = Handle(Context::cast(*object)->next_context_link(), isolate); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context causing a GC after the // specified number of elements. static int CountOptimizedUserFunctionsWithGC(v8::Local context, int n) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Isolate* isolate = icontext->GetIsolate(); Handle object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST), isolate); while (object->IsJSFunction() && !Handle::cast(object)->shared()->IsBuiltin()) { count++; if (count == n) isolate->heap()->CollectAllGarbage(); object = Handle( Object::cast(JSFunction::cast(*object)->next_function_link()), isolate); } return count; } TEST(TestInternalWeakListsTraverseWithGC) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create an number of contexts and check the length of the weak list both // with and without GCs while iterating the list. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); CHECK_EQ(i + 1, CountNativeContexts()); CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1)); } ctx[0]->Enter(); // Compile a number of functions the length of the weak list of optimized // functions both with and without GCs while iterating the list. CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(1, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(2, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(3, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(4, CountOptimizedUserFunctionsWithGC(ctx[0], 2)); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(5, CountOptimizedUserFunctionsWithGC(ctx[0], 4)); ctx[0]->Exit(); } TEST(TestSizeOfRegExpCode) { if (!FLAG_regexp_optimization) return; v8::V8::Initialize(); Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); LocalContext context; // Adjust source below and this check to match // RegExpImple::kRegExpTooLargeToOptimize. CHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB); // Compile a regexp that is much larger if we are using regexp optimizations. CompileRun( "var reg_exp_source = '(?:a|bc|def|ghij|klmno|pqrstu)';" "var half_size_reg_exp;" "while (reg_exp_source.length < 20 * 1024) {" " half_size_reg_exp = reg_exp_source;" " reg_exp_source = reg_exp_source + reg_exp_source;" "}" // Flatten string. "reg_exp_source.match(/f/);"); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(reg_exp_source);"); CcTest::heap()->CollectAllGarbage(); int size_with_regexp = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(half_size_reg_exp);"); CcTest::heap()->CollectAllGarbage(); int size_with_optimized_regexp = static_cast(CcTest::heap()->SizeOfObjects()); int size_of_regexp_code = size_with_regexp - initial_size; // On some platforms the debug-code flag causes huge amounts of regexp code // to be emitted, breaking this test. if (!FLAG_debug_code) { CHECK_LE(size_of_regexp_code, 1 * MB); } // Small regexp is half the size, but compiles to more than twice the code // due to the optimization steps. CHECK_GE(size_with_optimized_regexp, size_with_regexp + size_of_regexp_code * 2); } HEAP_TEST(TestSizeOfObjects) { v8::V8::Initialize(); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); { // Allocate objects on several different old-space pages so that // concurrent sweeper threads will be busy sweeping the old space on // subsequent GC runs. AlwaysAllocateScope always_allocate(CcTest::i_isolate()); int filler_size = static_cast(FixedArray::SizeFor(8192)); for (int i = 1; i <= 100; i++) { CcTest::heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked(); CHECK_EQ(initial_size + i * filler_size, static_cast(CcTest::heap()->SizeOfObjects())); } } // The heap size should go back to initial size after a full GC, even // though sweeping didn't finish yet. CcTest::heap()->CollectAllGarbage(); // Normally sweeping would not be complete here, but no guarantees. CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); // Waiting for sweeper threads should not change heap size. if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); } TEST(TestAlignmentCalculations) { // Maximum fill amounts are consistent. int maximum_double_misalignment = kDoubleSize - kPointerSize; int maximum_simd128_misalignment = kSimd128Size - kPointerSize; int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned); CHECK_EQ(0, max_word_fill); int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned); CHECK_EQ(maximum_double_misalignment, max_double_fill); int max_double_unaligned_fill = Heap::GetMaximumFillToAlign(kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, max_double_unaligned_fill); int max_simd128_unaligned_fill = Heap::GetMaximumFillToAlign(kSimd128Unaligned); CHECK_EQ(maximum_simd128_misalignment, max_simd128_unaligned_fill); Address base = static_cast(NULL); int fill = 0; // Word alignment never requires fill. fill = Heap::GetFillToAlign(base, kWordAligned); CHECK_EQ(0, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kWordAligned); CHECK_EQ(0, fill); // No fill is required when address is double aligned. fill = Heap::GetFillToAlign(base, kDoubleAligned); CHECK_EQ(0, fill); // Fill is required if address is not double aligned. fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleAligned); CHECK_EQ(maximum_double_misalignment, fill); // kDoubleUnaligned has the opposite fill amounts. fill = Heap::GetFillToAlign(base, kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleUnaligned); CHECK_EQ(0, fill); // 128 bit SIMD types have 2 or 4 possible alignments, depending on platform. fill = Heap::GetFillToAlign(base, kSimd128Unaligned); CHECK_EQ((3 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kSimd128Unaligned); CHECK_EQ((2 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + 2 * kPointerSize, kSimd128Unaligned); CHECK_EQ(kPointerSize, fill); fill = Heap::GetFillToAlign(base + 3 * kPointerSize, kSimd128Unaligned); CHECK_EQ(0, fill); } static HeapObject* NewSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->new_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get new space allocation into the desired alignment. static Address AlignNewSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); if (fill) { NewSpaceAllocateAligned(fill + offset, kWordAligned); } return *top_addr; } TEST(TestAlignedAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); Address start; HeapObject* obj; HeapObject* filler; if (double_misalignment) { // Allocate a pointer sized object that must be double aligned at an // aligned address. start = AlignNewSpace(kDoubleAligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); // Allocate a second pointer sized object that must be double aligned at an // unaligned address. start = AlignNewSpace(kDoubleAligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); // Similarly for kDoubleUnaligned. start = AlignNewSpace(kDoubleUnaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kDoubleUnaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignNewSpace(kSimd128Unaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kSimd128Size - kPointerSize); CHECK_EQ(kPointerSize + kSimd128Size - kPointerSize, *top_addr - start); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignNewSpace(kSimd128Unaligned, 2 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == 2 * kPointerSize); CHECK_EQ(kPointerSize + 2 * kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, 3 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + kPointerSize, *top_addr - start); } } static HeapObject* OldSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->old_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get old space allocation into the desired alignment. static Address AlignOldSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->old_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); int allocation = fill + offset; if (allocation) { OldSpaceAllocateAligned(allocation, kWordAligned); } Address top = *top_addr; // Now force the remaining allocation onto the free list. CcTest::heap()->old_space()->EmptyAllocationInfo(); return top; } // Test the case where allocation must be done from the free list, so filler // may precede or follow the object. TEST(TestAlignedOverAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address start; HeapObject* obj; HeapObject* filler1; HeapObject* filler2; if (double_misalignment) { start = AlignOldSpace(kDoubleAligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleAligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1); CHECK(filler1->IsFiller()); CHECK(filler1->Size() == kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Similarly for kDoubleUnaligned. start = AlignOldSpace(kDoubleUnaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleUnaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignOldSpace(kSimd128Unaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object after the object. filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); start = AlignOldSpace(kSimd128Unaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == 2 * kPointerSize); filler2 = HeapObject::FromAddress(start + 3 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == kPointerSize); start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); filler2 = HeapObject::FromAddress(start + 2 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == 2 * kPointerSize); } } TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { CcTest::InitializeVM(); HeapIterator iterator(CcTest::heap()); intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects(); intptr_t size_of_objects_2 = 0; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (!obj->IsFreeSpace()) { size_of_objects_2 += obj->Size(); } } // Delta must be within 5% of the larger result. // TODO(gc): Tighten this up by distinguishing between byte // arrays that are real and those that merely mark free space // on the heap. if (size_of_objects_1 > size_of_objects_2) { intptr_t delta = size_of_objects_1 - size_of_objects_2; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_1 / 20, delta); } else { intptr_t delta = size_of_objects_2 - size_of_objects_1; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_2 / 20, delta); } } static void FillUpNewSpace(NewSpace* new_space) { // Fill up new space to the point that it is completely full. Make sure // that the scavenger does not undo the filling. Heap* heap = new_space->heap(); Isolate* isolate = heap->isolate(); Factory* factory = isolate->factory(); HandleScope scope(isolate); AlwaysAllocateScope always_allocate(isolate); intptr_t available = new_space->Capacity() - new_space->Size(); intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; for (intptr_t i = 0; i < number_of_fillers; i++) { CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); } } TEST(GrowAndShrinkNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); NewSpace* new_space = heap->new_space(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } // Explicitly growing should double the space capacity. intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); old_capacity = new_space->TotalCapacity(); FillUpNewSpace(new_space); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Explicitly shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Let the scavenger empty the new space. heap->CollectGarbage(NEW_SPACE); CHECK_LE(new_space->Size(), old_capacity); // Explicitly shrinking should halve the space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == 2 * new_capacity); // Consecutive shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_space->Shrink(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } TEST(CollectingAllAvailableGarbageShrinksNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } v8::HandleScope scope(CcTest::isolate()); NewSpace* new_space = heap->new_space(); intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); FillUpNewSpace(new_space); heap->CollectAllAvailableGarbage(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } static int NumberOfGlobalObjects() { int count = 0; HeapIterator iterator(CcTest::heap()); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsJSGlobalObject()) count++; } return count; } // Test that we don't embed maps from foreign contexts into // optimized code. TEST(LeakNativeContextViaMap) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = {x: 42}"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o.x; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); v8::Local::New(isolate, ctx1)->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } // Test that we don't embed functions from foreign contexts into // optimized code. TEST(LeakNativeContextViaFunction) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = function() { return 42; }"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f(x) { return x(); }" "for (var i = 0; i < 10; ++i) f(o);" "%OptimizeFunctionOnNextCall(f);" "f(o);"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapKeyed) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = [42, 43]"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o[0]; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapProto) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent
copy = isolate->factory()->NewCode( desc, Code::ComputeFlags(Code::STUB), Handle()); HeapObject* obj_copy = HeapObject::cast(*copy); Object* not_right = isolate->FindCodeObject(obj_copy->address() + obj_copy->Size() / 2); CHECK(not_right != *code); } TEST(HandleNull) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); HandleScope outer_scope(isolate); LocalContext context; Handle n(static_cast(nullptr), isolate); CHECK(!n.is_null()); } TEST(HeapObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); HandleScope sc(isolate); Handle value = factory->NewNumber(1.000123); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(1.000123, value->Number()); value = factory->NewNumber(1.0); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1.0, value->Number()); value = factory->NewNumberFromInt(1024); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1024.0, value->Number()); value = factory->NewNumberFromInt(Smi::kMinValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMinValue, Handle::cast(value)->value()); value = factory->NewNumberFromInt(Smi::kMaxValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMaxValue, Handle::cast(value)->value()); #if !defined(V8_TARGET_ARCH_64_BIT) // TODO(lrn): We need a NumberFromIntptr function in order to test this. value = factory->NewNumberFromInt(Smi::kMinValue - 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(Smi::kMinValue - 1), value->Number()); #endif value = factory->NewNumberFromUint(static_cast(Smi::kMaxValue) + 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(Smi::kMaxValue) + 1), value->Number()); value = factory->NewNumberFromUint(static_cast(1) << 31); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(1) << 31), value->Number()); // nan oddball checks CHECK(factory->nan_value()->IsNumber()); CHECK(std::isnan(factory->nan_value()->Number())); Handle s = factory->NewStringFromStaticChars("fisk hest "); CHECK(s->IsString()); CHECK_EQ(10, s->length()); Handle object_string = Handle::cast(factory->Object_string()); Handle global( CcTest::i_isolate()->context()->global_object()); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string)); // Check ToString for oddballs CheckOddball(isolate, heap->true_value(), "true"); CheckOddball(isolate, heap->false_value(), "false"); CheckOddball(isolate, heap->null_value(), "null"); CheckOddball(isolate, heap->undefined_value(), "undefined"); // Check ToString for Smis CheckSmi(isolate, 0, "0"); CheckSmi(isolate, 42, "42"); CheckSmi(isolate, -42, "-42"); // Check ToString for Numbers CheckNumber(isolate, 1.1, "1.1"); CheckFindCodeObject(isolate); } template static void CheckSimdValue(T* value, LANE_TYPE lane_values[LANES], LANE_TYPE other_value) { // Check against lane_values, and check that all lanes can be set to // other_value without disturbing the other lanes. for (int i = 0; i < LANES; i++) { CHECK_EQ(lane_values[i], value->get_lane(i)); } for (int i = 0; i < LANES; i++) { value->set_lane(i, other_value); // change the value for (int j = 0; j < LANES; j++) { if (i != j) CHECK_EQ(lane_values[j], value->get_lane(j)); else CHECK_EQ(other_value, value->get_lane(j)); } value->set_lane(i, lane_values[i]); // restore the lane } CHECK(value->BooleanValue()); // SIMD values are 'true'. } TEST(SimdObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Float32x4 { float lanes[4] = {1, 2, 3, 4}; float quiet_NaN = std::numeric_limits::quiet_NaN(); float signaling_NaN = std::numeric_limits::signaling_NaN(); Handle value = factory->NewFloat32x4(lanes); CHECK(value->IsFloat32x4()); CheckSimdValue(*value, lanes, 3.14f); // Check special lane values. value->set_lane(1, -0.0); CHECK_EQ(-0.0f, value->get_lane(1)); CHECK(std::signbit(value->get_lane(1))); // Sign bit should be preserved. value->set_lane(2, quiet_NaN); CHECK(std::isnan(value->get_lane(2))); value->set_lane(3, signaling_NaN); CHECK(std::isnan(value->get_lane(3))); #ifdef OBJECT_PRINT // Check value printing. { value = factory->NewFloat32x4(lanes); std::ostringstream os; value->Float32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); } { float special_lanes[4] = {0, -0.0, quiet_NaN, signaling_NaN}; value = factory->NewFloat32x4(special_lanes); std::ostringstream os; value->Float32x4Print(os); // Value printing doesn't preserve signed zeroes. CHECK_EQ("0, 0, NaN, NaN", os.str()); } #endif // OBJECT_PRINT } // Int32x4 { int32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewInt32x4(lanes); CHECK(value->IsInt32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Int32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Uint32x4 { uint32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewUint32x4(lanes); CHECK(value->IsUint32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Bool32x4 { bool lanes[4] = {true, false, true, false}; Handle value = factory->NewBool32x4(lanes); CHECK(value->IsBool32x4()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool32x4Print(os); CHECK_EQ("true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int16x8 { int16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewInt16x8(lanes); CHECK(value->IsInt16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Int16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Uint16x8 { uint16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewUint16x8(lanes); CHECK(value->IsUint16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Bool16x8 { bool lanes[8] = {true, false, true, false, true, false, true, false}; Handle value = factory->NewBool16x8(lanes); CHECK(value->IsBool16x8()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool16x8Print(os); CHECK_EQ("true, false, true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int8x16 { int8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewInt8x16(lanes); CHECK(value->IsInt8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Int8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Uint8x16 { uint8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewUint8x16(lanes); CHECK(value->IsUint8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Bool8x16 { bool lanes[16] = {true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}; Handle value = factory->NewBool8x16(lanes); CHECK(value->IsBool8x16()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool8x16Print(os); CHECK_EQ( "true, false, true, false, true, false, true, false, true, false, " "true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } } TEST(Tagging) { CcTest::InitializeVM(); int request = 24; CHECK_EQ(request, static_cast(OBJECT_POINTER_ALIGN(request))); CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi()); CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi()); } TEST(GarbageCollection) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Check GC. heap->CollectGarbage(NEW_SPACE); Handle global( CcTest::i_isolate()->context()->global_object()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle prop_namex = factory->InternalizeUtf8String("theSlotx"); Handle obj_name = factory->InternalizeUtf8String("theObject"); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); { HandleScope inner_scope(isolate); // Allocate a function and keep it in global object's property. Handle function = factory->NewFunction(name); JSReceiver::SetProperty(global, name, function, SLOPPY).Check(); // Allocate an object. Unrooted after leaving the scope. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_namex, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(obj, prop_namex).ToHandleChecked()); } heap->CollectGarbage(NEW_SPACE); // Function should be alive. CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name)); // Check function is retained. Handle func_value = Object::GetProperty(global, name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); { HandleScope inner_scope(isolate); // Allocate another object, make it reachable from global. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); } // After gc, it should survive. heap->CollectGarbage(NEW_SPACE); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name)); Handle obj = Object::GetProperty(global, obj_name).ToHandleChecked(); CHECK(obj->IsJSObject()); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); } static void VerifyStringAllocation(Isolate* isolate, const char* string) { HandleScope scope(isolate); Handle s = isolate->factory()->NewStringFromUtf8( CStrVector(string)).ToHandleChecked(); CHECK_EQ(StrLength(string), s->length()); for (int index = 0; index < s->length(); index++) { CHECK_EQ(static_cast(string[index]), s->Get(index)); } } TEST(String) { CcTest::InitializeVM(); Isolate* isolate = reinterpret_cast(CcTest::isolate()); VerifyStringAllocation(isolate, "a"); VerifyStringAllocation(isolate, "ab"); VerifyStringAllocation(isolate, "abc"); VerifyStringAllocation(isolate, "abcd"); VerifyStringAllocation(isolate, "fiskerdrengen er paa havet"); } TEST(LocalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* name = "Kasper the spunky"; Handle string = factory->NewStringFromAsciiChecked(name); CHECK_EQ(StrLength(name), string->length()); } TEST(GlobalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); Handle h1; Handle h2; Handle h3; Handle h4; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); h3 = global_handles->Create(*i); h4 = global_handles->Create(*u); } // after gc, it should survive heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK((*h3)->IsString()); CHECK((*h4)->IsHeapNumber()); CHECK_EQ(*h3, *h1); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h3.location()); CHECK_EQ(*h4, *h2); GlobalHandles::Destroy(h2.location()); GlobalHandles::Destroy(h4.location()); } static bool WeakPointerCleared = false; static void TestWeakGlobalHandleCallback( const v8::WeakCallbackInfo& data) { std::pair*, int>* p = reinterpret_cast*, int>*>( data.GetParameter()); if (p->second == 1234) WeakPointerCleared = true; p->first->Reset(); } TEST(WeakGlobalHandlesScavenge) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scavenge treats weak pointers as normal roots. heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK(!WeakPointerCleared); CHECK(!global_handles->IsNearDeath(h2.location())); CHECK(!global_handles->IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h2.location()); } TEST(WeakGlobalHandlesMark) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } // Make sure the objects are promoted. heap->CollectGarbage(OLD_SPACE); heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2)); std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); CHECK(!GlobalHandles::IsNearDeath(h1.location())); CHECK(!GlobalHandles::IsNearDeath(h2.location())); // Incremental marking potentially marked handles before they turned weak. heap->CollectAllGarbage(); CHECK((*h1)->IsString()); CHECK(WeakPointerCleared); CHECK(!GlobalHandles::IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); } TEST(DeleteWeakGlobalHandle) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); h = global_handles->Create(*i); } std::pair*, int> handle_and_id(&h, 1234); GlobalHandles::MakeWeak(h.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scanvenge does not recognize weak reference. heap->CollectGarbage(NEW_SPACE); CHECK(!WeakPointerCleared); // Mark-compact treats weak reference properly. heap->CollectGarbage(OLD_SPACE); CHECK(WeakPointerCleared); } TEST(DoNotPromoteWhiteObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle white = factory->NewStringFromStaticChars("white"); CHECK(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*white)))); heap->CollectGarbage(NEW_SPACE); CHECK(heap->InNewSpace(*white)); } TEST(PromoteGreyOrBlackObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle marked = factory->NewStringFromStaticChars("marked"); IncrementalMarking* marking = heap->incremental_marking(); marking->Stop(); heap->StartIncrementalMarking(); while (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*marked)))) { marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD, IncrementalMarking::FORCE_MARKING, IncrementalMarking::DO_NOT_FORCE_COMPLETION); } heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*marked)); } TEST(BytecodeArray) { static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a}; static const int kRawBytesSize = sizeof(kRawBytes); static const int kFrameSize = 32; static const int kParameterCount = 2; i::FLAG_manual_evacuation_candidates_selection = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); heap::SimulateFullSpace(heap->old_space()); Handle constant_pool = factory->NewFixedArray(5, TENURED); for (int i = 0; i < 5; i++) { Handle number = factory->NewHeapNumber(i); constant_pool->set(i, *number); } // Allocate and initialize BytecodeArray Handle array = factory->NewBytecodeArray( kRawBytesSize, kRawBytes, kFrameSize, kParameterCount, constant_pool); CHECK(array->IsBytecodeArray()); CHECK_EQ(array->length(), (int)sizeof(kRawBytes)); CHECK_EQ(array->frame_size(), kFrameSize); CHECK_EQ(array->parameter_count(), kParameterCount); CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_LE(array->address(), array->GetFirstBytecodeAddress()); CHECK_GE(array->address() + array->BytecodeArraySize(), array->GetFirstBytecodeAddress() + array->length()); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); CHECK_EQ(array->get(i), kRawBytes[i]); } FixedArray* old_constant_pool_address = *constant_pool; // Perform a full garbage collection and force the constant pool to be on an // evacuation candidate. Page* evac_page = Page::FromAddress(constant_pool->address()); evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); heap->CollectAllGarbage(); // BytecodeArray should survive. CHECK_EQ(array->length(), kRawBytesSize); CHECK_EQ(array->frame_size(), kFrameSize); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->get(i), kRawBytes[i]); CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); } // Constant pool should have been migrated. CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_NE(array->constant_pool(), old_constant_pool_address); } static const char* not_so_random_string_table[] = { "abstract", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "var", "void", "volatile", "while", "with", 0 }; static void CheckInternalizedStrings(const char** strings) { Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); for (const char* string = *strings; *strings != 0; string = *strings++) { HandleScope scope(isolate); Handle a = isolate->factory()->InternalizeUtf8String(CStrVector(string)); // InternalizeUtf8String may return a failure if a GC is needed. CHECK(a->IsInternalizedString()); Handle b = factory->InternalizeUtf8String(string); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); b = isolate->factory()->InternalizeUtf8String(CStrVector(string)); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); } } TEST(StringTable) { CcTest::InitializeVM(); v8::HandleScope sc(CcTest::isolate()); CheckInternalizedStrings(not_so_random_string_table); CheckInternalizedStrings(not_so_random_string_table); } TEST(FunctionAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check that we can add properties to function objects. JSReceiver::SetProperty(function, prop_name, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(function, prop_name).ToHandleChecked()); } TEST(ObjectProperties) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); // check for empty CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); // delete first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete first and then second CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete second and then first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // check string and internalized string match const char* string1 = "fisk"; Handle s1 = factory->NewStringFromAsciiChecked(string1); JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check(); Handle s1_string = factory->InternalizeUtf8String(string1); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string)); // check internalized string and string match const char* string2 = "fugl"; Handle s2_string = factory->InternalizeUtf8String(string2); JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check(); Handle s2 = factory->NewStringFromAsciiChecked(string2); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2)); } TEST(JSObjectMaps) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); Handle initial_map(function->initial_map()); // Set a propery Handle twenty_three(Smi::FromInt(23), isolate); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check the map has changed CHECK(*initial_map != obj->map()); } TEST(JSArray) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("Array"); Handle fun_obj = Object::GetProperty( CcTest::i_isolate()->global_object(), name).ToHandleChecked(); Handle function = Handle::cast(fun_obj); // Allocate the object. Handle element; Handle object = factory->NewJSObject(function); Handle array = Handle::cast(object); // We just initialized the VM, no heap allocation failure yet. JSArray::Initialize(array, 0); // Set array length to 0. JSArray::SetLength(array, 0); CHECK_EQ(Smi::FromInt(0), array->length()); // Must be in fast mode. CHECK(array->HasFastSmiOrObjectElements()); // array[length] = name. JSReceiver::SetElement(isolate, array, 0, name, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(1), array->length()); element = i::Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); // Set array length with larger than smi value. JSArray::SetLength(array, static_cast(Smi::kMaxValue) + 1); uint32_t int_length = 0; CHECK(array->length()->ToArrayIndex(&int_length)); CHECK_EQ(static_cast(Smi::kMaxValue) + 1, int_length); CHECK(array->HasDictionaryElements()); // Must be in slow mode. // array[length] = name. JSReceiver::SetElement(isolate, array, int_length, name, SLOPPY).Check(); uint32_t new_int_length = 0; CHECK(array->length()->ToArrayIndex(&new_int_length)); CHECK_EQ(static_cast(int_length), new_int_length - 1); element = Object::GetElement(isolate, array, int_length).ToHandleChecked(); CHECK_EQ(*element, *name); element = Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); } TEST(JSObjectCopy) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 0, first, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 1, second, SLOPPY).Check(); // Make the clone. Handle value1, value2; Handle clone = factory->CopyJSObject(obj); CHECK(!clone.is_identical_to(obj)); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); // Flip the values. JSReceiver::SetProperty(clone, first, two, SLOPPY).Check(); JSReceiver::SetProperty(clone, second, one, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 0, second, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 1, first, SLOPPY).Check(); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); } TEST(StringAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 }; for (int length = 0; length < 100; length++) { v8::HandleScope scope(CcTest::isolate()); char* non_one_byte = NewArray(3 * length + 1); char* one_byte = NewArray(length + 1); non_one_byte[3 * length] = 0; one_byte[length] = 0; for (int i = 0; i < length; i++) { one_byte[i] = 'a'; non_one_byte[3 * i] = chars[0]; non_one_byte[3 * i + 1] = chars[1]; non_one_byte[3 * i + 2] = chars[2]; } Handle non_one_byte_sym = factory->InternalizeUtf8String( Vector(non_one_byte, 3 * length)); CHECK_EQ(length, non_one_byte_sym->length()); Handle one_byte_sym = factory->InternalizeOneByteString(OneByteVector(one_byte, length)); CHECK_EQ(length, one_byte_sym->length()); Handle non_one_byte_str = factory->NewStringFromUtf8(Vector(non_one_byte, 3 * length)) .ToHandleChecked(); non_one_byte_str->Hash(); CHECK_EQ(length, non_one_byte_str->length()); Handle one_byte_str = factory->NewStringFromUtf8(Vector(one_byte, length)) .ToHandleChecked(); one_byte_str->Hash(); CHECK_EQ(length, one_byte_str->length()); DeleteArray(non_one_byte); DeleteArray(one_byte); } } static int ObjectsFoundInHeap(Heap* heap, Handle objs[], int size) { // Count the number of objects found in the heap. int found_count = 0; HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { for (int i = 0; i < size; i++) { if (*objs[i] == obj) { found_count++; } } } return found_count; } TEST(Iteration) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); // Array of objects to scan haep for. const int objs_count = 6; Handle objs[objs_count]; int next_objs_index = 0; // Allocate a JS array to OLD_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewJSArray(10); objs[next_objs_index++] = factory->NewJSArray(10, FAST_HOLEY_ELEMENTS, TENURED); // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij"); objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij", TENURED); // Allocate a large string (for large object space). int large_size = Page::kMaxRegularHeapObjectSize + 1; char* str = new char[large_size]; for (int i = 0; i < large_size - 1; ++i) str[i] = 'a'; str[large_size - 1] = '\0'; objs[next_objs_index++] = factory->NewStringFromAsciiChecked(str, TENURED); delete[] str; // Add a Map object to look for. objs[next_objs_index++] = Handle(HeapObject::cast(*objs[0])->map()); CHECK_EQ(objs_count, next_objs_index); CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count)); } UNINITIALIZED_TEST(TestCodeFlushing) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); i::Isolate* i_isolate = reinterpret_cast(isolate); isolate->Enter(); Factory* factory = i_isolate->factory(); { v8::HandleScope scope(isolate); v8::Context::New(isolate)->Enter(); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(isolate); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(i_isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. i_isolate->heap()->CollectAllGarbage(); i_isolate->heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { i_isolate->heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } isolate->Exit(); isolate->Dispose(); } TEST(TestCodeFlushingPreAged) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // Compile foo, but don't run it. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code has been run so will survive at least one GC. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // The code was only run once, so it should be pre-aged and collected on the // next GC. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); // Execute the function again twice, and ensure it is reset to the young age. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();" "foo();"); } // The code will survive at least two GC now that it is young again. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { CcTest::heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } TEST(TestCodeFlushingIncremental) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use incremental marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); CcTest::heap()->CollectAllGarbage(); } CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // This compile will compile the function again. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();"); } // Simulate several GCs that use incremental marking but make sure // the loop breaks once the function is enqueued as a candidate. for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); if (!function->next_function_link()->IsUndefined(CcTest::i_isolate())) break; CcTest::heap()->CollectAllGarbage(); } // Force optimization while incremental marking is active and while // the function is enqueued as a candidate. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestCodeFlushingIncrementalScavenge) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "var foo = function() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo();" "var bar = function() {" " var x = 23;" "};" "bar();"; Handle foo_name = factory->InternalizeUtf8String("foo"); Handle bar_name = factory->InternalizeUtf8String("bar"); // Perfrom one initial GC to enable code flushing. CcTest::heap()->CollectAllGarbage(); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check functions are compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); Handle func_value2 = Object::GetProperty(isolate->global_object(), bar_name).ToHandleChecked(); CHECK(func_value2->IsJSFunction()); Handle function2 = Handle::cast(func_value2); CHECK(function2->shared()->is_compiled()); // Clear references to functions so that one of them can die. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo = 0; bar = 0;"); } // Bump the code age so that flushing is triggered while the function // object is still located in new-space. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); function2->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the functions are enqueued as // code flushing candidates. Then kill one of the functions. Finally // perform a scavenge while incremental marking is still running. heap::SimulateIncrementalMarking(CcTest::heap()); *function2.location() = NULL; CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking"); // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); } TEST(TestCodeFlushingIncrementalAbort) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. heap->CollectAllGarbage(); heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Bump the code age so that flushing is triggered. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the function is enqueued as // code flushing candidate. heap::SimulateIncrementalMarking(heap); // Enable the debugger and add a breakpoint while incremental marking // is running so that incremental marking aborts and code flushing is // disabled. int position = 0; Handle breakpoint_object(Smi::FromInt(0), isolate); EnableDebugger(CcTest::isolate()); isolate->debug()->SetBreakPoint(function, breakpoint_object, &position); isolate->debug()->ClearAllBreakPoints(); DisableDebugger(CcTest::isolate()); // Force optimization now that code flushing is disabled. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestUseOfIncrementalBarrierOnCompileLazy) { // Turn off always_opt because it interferes with running the built-in for // the last call to g(). i::FLAG_always_opt = false; i::FLAG_allow_natives_syntax = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); CompileRun( "function make_closure(x) {" " return function() { return x + 3 };" "}" "var f = make_closure(5); f();" "var g = make_closure(5);"); // Check f is compiled. Handle f_name = factory->InternalizeUtf8String("f"); Handle f_value = Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked(); Handle f_function = Handle::cast(f_value); CHECK(f_function->is_compiled()); // Check g is not compiled. Handle g_name = factory->InternalizeUtf8String("g"); Handle g_value = Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked(); Handle g_function = Handle::cast(g_value); CHECK(!g_function->is_compiled()); heap::SimulateIncrementalMarking(heap); CompileRun("%OptimizeFunctionOnNextCall(f); f();"); // g should now have available an optimized function, unmarked by gc. The // CompileLazy built-in will discover it and install it in the closure, and // the incremental write barrier should be used. CompileRun("g();"); CHECK(g_function->is_compiled()); } TEST(CompilationCacheCachingBehavior) { // If we do not flush code, or have the compilation cache turned off, this // test is invalid. if (!FLAG_flush_code || !FLAG_compilation_cache) { return; } CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); CompilationCache* compilation_cache = isolate->compilation_cache(); LanguageMode language_mode = construct_language_mode(FLAG_use_strict); v8::HandleScope scope(CcTest::isolate()); const char* raw_source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle source = factory->InternalizeUtf8String(raw_source); Handle native_context = isolate->native_context(); { v8::HandleScope scope(CcTest::isolate()); CompileRun(raw_source); } // The script should be in the cache now. MaybeHandle info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); // Check that the code cache entry survives at least on GC. // (Unless --optimize-for-size, in which case it might get collected // immediately.) if (!FLAG_optimize_for_size) { heap->CollectAllGarbage(); info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); } // Progress code age until it's old and ready for GC. while (!info.ToHandleChecked()->code()->IsOld()) { // To guarantee progress, we have to MakeOlder with different parities. // We can't just use NO_MARKING_PARITY, since e.g. kExecutedOnceCodeAge is // always NO_MARKING_PARITY and the code age only progresses if the parity // is different. info.ToHandleChecked()->code()->MakeOlder(ODD_MARKING_PARITY); info.ToHandleChecked()->code()->MakeOlder(EVEN_MARKING_PARITY); } heap->CollectAllGarbage(); // Ensure code aging cleared the entry from the cache. info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(info.is_null()); } static void OptimizeEmptyFunction(const char* name) { HandleScope scope(CcTest::i_isolate()); EmbeddedVector source; SNPrintF(source, "function %s() { return 0; }" "%s(); %s();" "%%OptimizeFunctionOnNextCall(%s);" "%s();", name, name, name, name, name); CompileRun(source.start()); } // Count the number of native contexts in the weak list of native contexts. int CountNativeContexts() { int count = 0; Object* object = CcTest::heap()->native_contexts_list(); while (!object->IsUndefined(CcTest::i_isolate())) { count++; object = Context::cast(object)->next_context_link(); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context. static int CountOptimizedUserFunctions(v8::Local context) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST); while (object->IsJSFunction() && !JSFunction::cast(object)->shared()->IsBuiltin()) { count++; object = JSFunction::cast(object)->next_function_link(); } return count; } TEST(TestInternalWeakLists) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); // Some flags turn Scavenge collections into Mark-sweep collections // and hence are incompatible with this test case. if (FLAG_gc_global || FLAG_stress_compaction) return; FLAG_retain_maps_for_n_gc = 0; static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create a number of global contests which gets linked together. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); // Collect garbage that might have been created by one of the // installed extensions. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(i + 1, CountNativeContexts()); ctx[i]->Enter(); // Create a handle scope so no function objects get stuck in the outer // handle scope. HandleScope scope(isolate); CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); // Remove function f1, and CompileRun("f1=null"); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); } // Mark compact handles the weak references. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); // Get rid of f3 and f5 in the same way. CompileRun("f3=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); CompileRun("f5=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); ctx[i]->Exit(); } // Force compilation cache cleanup. CcTest::heap()->NotifyContextDisposed(true); CcTest::heap()->CollectAllGarbage(); // Dispose the native contexts one by one. for (int i = 0; i < kNumTestContexts; i++) { // TODO(dcarney): is there a better way to do this? i::Object** unsafe = reinterpret_cast(*ctx[i]); *unsafe = CcTest::heap()->undefined_value(); ctx[i].Clear(); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(i::NEW_SPACE); CHECK_EQ(kNumTestContexts - i, CountNativeContexts()); } // Mark compact handles the weak references. CcTest::heap()->CollectAllGarbage(); CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts()); } CHECK_EQ(0, CountNativeContexts()); } // Count the number of native contexts in the weak list of native contexts // causing a GC after the specified number of elements. static int CountNativeContextsWithGC(Isolate* isolate, int n) { Heap* heap = isolate->heap(); int count = 0; Handle object(heap->native_contexts_list(), isolate); while (!object->IsUndefined(isolate)) { count++; if (count == n) heap->CollectAllGarbage(); object = Handle(Context::cast(*object)->next_context_link(), isolate); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context causing a GC after the // specified number of elements. static int CountOptimizedUserFunctionsWithGC(v8::Local context, int n) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Isolate* isolate = icontext->GetIsolate(); Handle object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST), isolate); while (object->IsJSFunction() && !Handle::cast(object)->shared()->IsBuiltin()) { count++; if (count == n) isolate->heap()->CollectAllGarbage(); object = Handle( Object::cast(JSFunction::cast(*object)->next_function_link()), isolate); } return count; } TEST(TestInternalWeakListsTraverseWithGC) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create an number of contexts and check the length of the weak list both // with and without GCs while iterating the list. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); CHECK_EQ(i + 1, CountNativeContexts()); CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1)); } ctx[0]->Enter(); // Compile a number of functions the length of the weak list of optimized // functions both with and without GCs while iterating the list. CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(1, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(2, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(3, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(4, CountOptimizedUserFunctionsWithGC(ctx[0], 2)); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(5, CountOptimizedUserFunctionsWithGC(ctx[0], 4)); ctx[0]->Exit(); } TEST(TestSizeOfRegExpCode) { if (!FLAG_regexp_optimization) return; v8::V8::Initialize(); Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); LocalContext context; // Adjust source below and this check to match // RegExpImple::kRegExpTooLargeToOptimize. CHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB); // Compile a regexp that is much larger if we are using regexp optimizations. CompileRun( "var reg_exp_source = '(?:a|bc|def|ghij|klmno|pqrstu)';" "var half_size_reg_exp;" "while (reg_exp_source.length < 20 * 1024) {" " half_size_reg_exp = reg_exp_source;" " reg_exp_source = reg_exp_source + reg_exp_source;" "}" // Flatten string. "reg_exp_source.match(/f/);"); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(reg_exp_source);"); CcTest::heap()->CollectAllGarbage(); int size_with_regexp = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(half_size_reg_exp);"); CcTest::heap()->CollectAllGarbage(); int size_with_optimized_regexp = static_cast(CcTest::heap()->SizeOfObjects()); int size_of_regexp_code = size_with_regexp - initial_size; // On some platforms the debug-code flag causes huge amounts of regexp code // to be emitted, breaking this test. if (!FLAG_debug_code) { CHECK_LE(size_of_regexp_code, 1 * MB); } // Small regexp is half the size, but compiles to more than twice the code // due to the optimization steps. CHECK_GE(size_with_optimized_regexp, size_with_regexp + size_of_regexp_code * 2); } HEAP_TEST(TestSizeOfObjects) { v8::V8::Initialize(); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); { // Allocate objects on several different old-space pages so that // concurrent sweeper threads will be busy sweeping the old space on // subsequent GC runs. AlwaysAllocateScope always_allocate(CcTest::i_isolate()); int filler_size = static_cast(FixedArray::SizeFor(8192)); for (int i = 1; i <= 100; i++) { CcTest::heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked(); CHECK_EQ(initial_size + i * filler_size, static_cast(CcTest::heap()->SizeOfObjects())); } } // The heap size should go back to initial size after a full GC, even // though sweeping didn't finish yet. CcTest::heap()->CollectAllGarbage(); // Normally sweeping would not be complete here, but no guarantees. CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); // Waiting for sweeper threads should not change heap size. if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); } TEST(TestAlignmentCalculations) { // Maximum fill amounts are consistent. int maximum_double_misalignment = kDoubleSize - kPointerSize; int maximum_simd128_misalignment = kSimd128Size - kPointerSize; int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned); CHECK_EQ(0, max_word_fill); int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned); CHECK_EQ(maximum_double_misalignment, max_double_fill); int max_double_unaligned_fill = Heap::GetMaximumFillToAlign(kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, max_double_unaligned_fill); int max_simd128_unaligned_fill = Heap::GetMaximumFillToAlign(kSimd128Unaligned); CHECK_EQ(maximum_simd128_misalignment, max_simd128_unaligned_fill); Address base = static_cast(NULL); int fill = 0; // Word alignment never requires fill. fill = Heap::GetFillToAlign(base, kWordAligned); CHECK_EQ(0, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kWordAligned); CHECK_EQ(0, fill); // No fill is required when address is double aligned. fill = Heap::GetFillToAlign(base, kDoubleAligned); CHECK_EQ(0, fill); // Fill is required if address is not double aligned. fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleAligned); CHECK_EQ(maximum_double_misalignment, fill); // kDoubleUnaligned has the opposite fill amounts. fill = Heap::GetFillToAlign(base, kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleUnaligned); CHECK_EQ(0, fill); // 128 bit SIMD types have 2 or 4 possible alignments, depending on platform. fill = Heap::GetFillToAlign(base, kSimd128Unaligned); CHECK_EQ((3 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kSimd128Unaligned); CHECK_EQ((2 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + 2 * kPointerSize, kSimd128Unaligned); CHECK_EQ(kPointerSize, fill); fill = Heap::GetFillToAlign(base + 3 * kPointerSize, kSimd128Unaligned); CHECK_EQ(0, fill); } static HeapObject* NewSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->new_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get new space allocation into the desired alignment. static Address AlignNewSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); if (fill) { NewSpaceAllocateAligned(fill + offset, kWordAligned); } return *top_addr; } TEST(TestAlignedAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); Address start; HeapObject* obj; HeapObject* filler; if (double_misalignment) { // Allocate a pointer sized object that must be double aligned at an // aligned address. start = AlignNewSpace(kDoubleAligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); // Allocate a second pointer sized object that must be double aligned at an // unaligned address. start = AlignNewSpace(kDoubleAligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); // Similarly for kDoubleUnaligned. start = AlignNewSpace(kDoubleUnaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kDoubleUnaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignNewSpace(kSimd128Unaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kSimd128Size - kPointerSize); CHECK_EQ(kPointerSize + kSimd128Size - kPointerSize, *top_addr - start); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignNewSpace(kSimd128Unaligned, 2 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == 2 * kPointerSize); CHECK_EQ(kPointerSize + 2 * kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, 3 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + kPointerSize, *top_addr - start); } } static HeapObject* OldSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->old_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get old space allocation into the desired alignment. static Address AlignOldSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->old_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); int allocation = fill + offset; if (allocation) { OldSpaceAllocateAligned(allocation, kWordAligned); } Address top = *top_addr; // Now force the remaining allocation onto the free list. CcTest::heap()->old_space()->EmptyAllocationInfo(); return top; } // Test the case where allocation must be done from the free list, so filler // may precede or follow the object. TEST(TestAlignedOverAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address start; HeapObject* obj; HeapObject* filler1; HeapObject* filler2; if (double_misalignment) { start = AlignOldSpace(kDoubleAligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleAligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1); CHECK(filler1->IsFiller()); CHECK(filler1->Size() == kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Similarly for kDoubleUnaligned. start = AlignOldSpace(kDoubleUnaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleUnaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignOldSpace(kSimd128Unaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object after the object. filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); start = AlignOldSpace(kSimd128Unaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == 2 * kPointerSize); filler2 = HeapObject::FromAddress(start + 3 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == kPointerSize); start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); filler2 = HeapObject::FromAddress(start + 2 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == 2 * kPointerSize); } } TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { CcTest::InitializeVM(); HeapIterator iterator(CcTest::heap()); intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects(); intptr_t size_of_objects_2 = 0; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (!obj->IsFreeSpace()) { size_of_objects_2 += obj->Size(); } } // Delta must be within 5% of the larger result. // TODO(gc): Tighten this up by distinguishing between byte // arrays that are real and those that merely mark free space // on the heap. if (size_of_objects_1 > size_of_objects_2) { intptr_t delta = size_of_objects_1 - size_of_objects_2; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_1 / 20, delta); } else { intptr_t delta = size_of_objects_2 - size_of_objects_1; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_2 / 20, delta); } } static void FillUpNewSpace(NewSpace* new_space) { // Fill up new space to the point that it is completely full. Make sure // that the scavenger does not undo the filling. Heap* heap = new_space->heap(); Isolate* isolate = heap->isolate(); Factory* factory = isolate->factory(); HandleScope scope(isolate); AlwaysAllocateScope always_allocate(isolate); intptr_t available = new_space->Capacity() - new_space->Size(); intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; for (intptr_t i = 0; i < number_of_fillers; i++) { CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); } } TEST(GrowAndShrinkNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); NewSpace* new_space = heap->new_space(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } // Explicitly growing should double the space capacity. intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); old_capacity = new_space->TotalCapacity(); FillUpNewSpace(new_space); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Explicitly shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Let the scavenger empty the new space. heap->CollectGarbage(NEW_SPACE); CHECK_LE(new_space->Size(), old_capacity); // Explicitly shrinking should halve the space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == 2 * new_capacity); // Consecutive shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_space->Shrink(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } TEST(CollectingAllAvailableGarbageShrinksNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } v8::HandleScope scope(CcTest::isolate()); NewSpace* new_space = heap->new_space(); intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); FillUpNewSpace(new_space); heap->CollectAllAvailableGarbage(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } static int NumberOfGlobalObjects() { int count = 0; HeapIterator iterator(CcTest::heap()); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsJSGlobalObject()) count++; } return count; } // Test that we don't embed maps from foreign contexts into // optimized code. TEST(LeakNativeContextViaMap) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = {x: 42}"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o.x; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); v8::Local::New(isolate, ctx1)->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } // Test that we don't embed functions from foreign contexts into // optimized code. TEST(LeakNativeContextViaFunction) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = function() { return 42; }"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f(x) { return x(); }" "for (var i = 0; i < 10; ++i) f(o);" "%OptimizeFunctionOnNextCall(f);" "f(o);"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapKeyed) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = [42, 43]"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o[0]; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapProto) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent
()); HeapObject* obj_copy = HeapObject::cast(*copy); Object* not_right = isolate->FindCodeObject(obj_copy->address() + obj_copy->Size() / 2); CHECK(not_right != *code); } TEST(HandleNull) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); HandleScope outer_scope(isolate); LocalContext context; Handle n(static_cast(nullptr), isolate); CHECK(!n.is_null()); } TEST(HeapObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); HandleScope sc(isolate); Handle value = factory->NewNumber(1.000123); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(1.000123, value->Number()); value = factory->NewNumber(1.0); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1.0, value->Number()); value = factory->NewNumberFromInt(1024); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(1024.0, value->Number()); value = factory->NewNumberFromInt(Smi::kMinValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMinValue, Handle::cast(value)->value()); value = factory->NewNumberFromInt(Smi::kMaxValue); CHECK(value->IsSmi()); CHECK(value->IsNumber()); CHECK_EQ(Smi::kMaxValue, Handle::cast(value)->value()); #if !defined(V8_TARGET_ARCH_64_BIT) // TODO(lrn): We need a NumberFromIntptr function in order to test this. value = factory->NewNumberFromInt(Smi::kMinValue - 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(Smi::kMinValue - 1), value->Number()); #endif value = factory->NewNumberFromUint(static_cast(Smi::kMaxValue) + 1); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(Smi::kMaxValue) + 1), value->Number()); value = factory->NewNumberFromUint(static_cast(1) << 31); CHECK(value->IsHeapNumber()); CHECK(value->IsNumber()); CHECK_EQ(static_cast(static_cast(1) << 31), value->Number()); // nan oddball checks CHECK(factory->nan_value()->IsNumber()); CHECK(std::isnan(factory->nan_value()->Number())); Handle s = factory->NewStringFromStaticChars("fisk hest "); CHECK(s->IsString()); CHECK_EQ(10, s->length()); Handle object_string = Handle::cast(factory->Object_string()); Handle global( CcTest::i_isolate()->context()->global_object()); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string)); // Check ToString for oddballs CheckOddball(isolate, heap->true_value(), "true"); CheckOddball(isolate, heap->false_value(), "false"); CheckOddball(isolate, heap->null_value(), "null"); CheckOddball(isolate, heap->undefined_value(), "undefined"); // Check ToString for Smis CheckSmi(isolate, 0, "0"); CheckSmi(isolate, 42, "42"); CheckSmi(isolate, -42, "-42"); // Check ToString for Numbers CheckNumber(isolate, 1.1, "1.1"); CheckFindCodeObject(isolate); } template static void CheckSimdValue(T* value, LANE_TYPE lane_values[LANES], LANE_TYPE other_value) { // Check against lane_values, and check that all lanes can be set to // other_value without disturbing the other lanes. for (int i = 0; i < LANES; i++) { CHECK_EQ(lane_values[i], value->get_lane(i)); } for (int i = 0; i < LANES; i++) { value->set_lane(i, other_value); // change the value for (int j = 0; j < LANES; j++) { if (i != j) CHECK_EQ(lane_values[j], value->get_lane(j)); else CHECK_EQ(other_value, value->get_lane(j)); } value->set_lane(i, lane_values[i]); // restore the lane } CHECK(value->BooleanValue()); // SIMD values are 'true'. } TEST(SimdObjects) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Float32x4 { float lanes[4] = {1, 2, 3, 4}; float quiet_NaN = std::numeric_limits::quiet_NaN(); float signaling_NaN = std::numeric_limits::signaling_NaN(); Handle value = factory->NewFloat32x4(lanes); CHECK(value->IsFloat32x4()); CheckSimdValue(*value, lanes, 3.14f); // Check special lane values. value->set_lane(1, -0.0); CHECK_EQ(-0.0f, value->get_lane(1)); CHECK(std::signbit(value->get_lane(1))); // Sign bit should be preserved. value->set_lane(2, quiet_NaN); CHECK(std::isnan(value->get_lane(2))); value->set_lane(3, signaling_NaN); CHECK(std::isnan(value->get_lane(3))); #ifdef OBJECT_PRINT // Check value printing. { value = factory->NewFloat32x4(lanes); std::ostringstream os; value->Float32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); } { float special_lanes[4] = {0, -0.0, quiet_NaN, signaling_NaN}; value = factory->NewFloat32x4(special_lanes); std::ostringstream os; value->Float32x4Print(os); // Value printing doesn't preserve signed zeroes. CHECK_EQ("0, 0, NaN, NaN", os.str()); } #endif // OBJECT_PRINT } // Int32x4 { int32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewInt32x4(lanes); CHECK(value->IsInt32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Int32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Uint32x4 { uint32_t lanes[4] = {1, 2, 3, 4}; Handle value = factory->NewUint32x4(lanes); CHECK(value->IsUint32x4()); CheckSimdValue(*value, lanes, 3); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint32x4Print(os); CHECK_EQ("1, 2, 3, 4", os.str()); #endif // OBJECT_PRINT } // Bool32x4 { bool lanes[4] = {true, false, true, false}; Handle value = factory->NewBool32x4(lanes); CHECK(value->IsBool32x4()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool32x4Print(os); CHECK_EQ("true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int16x8 { int16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewInt16x8(lanes); CHECK(value->IsInt16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Int16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Uint16x8 { uint16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8}; Handle value = factory->NewUint16x8(lanes); CHECK(value->IsUint16x8()); CheckSimdValue(*value, lanes, 32767); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint16x8Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str()); #endif // OBJECT_PRINT } // Bool16x8 { bool lanes[8] = {true, false, true, false, true, false, true, false}; Handle value = factory->NewBool16x8(lanes); CHECK(value->IsBool16x8()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool16x8Print(os); CHECK_EQ("true, false, true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } // Int8x16 { int8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewInt8x16(lanes); CHECK(value->IsInt8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Int8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Uint8x16 { uint8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; Handle value = factory->NewUint8x16(lanes); CHECK(value->IsUint8x16()); CheckSimdValue(*value, lanes, 127); #ifdef OBJECT_PRINT std::ostringstream os; value->Uint8x16Print(os); CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str()); #endif // OBJECT_PRINT } // Bool8x16 { bool lanes[16] = {true, false, true, false, true, false, true, false, true, false, true, false, true, false, true, false}; Handle value = factory->NewBool8x16(lanes); CHECK(value->IsBool8x16()); CheckSimdValue(*value, lanes, false); #ifdef OBJECT_PRINT std::ostringstream os; value->Bool8x16Print(os); CHECK_EQ( "true, false, true, false, true, false, true, false, true, false, " "true, false, true, false, true, false", os.str()); #endif // OBJECT_PRINT } } TEST(Tagging) { CcTest::InitializeVM(); int request = 24; CHECK_EQ(request, static_cast(OBJECT_POINTER_ALIGN(request))); CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi()); CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi()); } TEST(GarbageCollection) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope sc(isolate); // Check GC. heap->CollectGarbage(NEW_SPACE); Handle global( CcTest::i_isolate()->context()->global_object()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle prop_namex = factory->InternalizeUtf8String("theSlotx"); Handle obj_name = factory->InternalizeUtf8String("theObject"); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); { HandleScope inner_scope(isolate); // Allocate a function and keep it in global object's property. Handle function = factory->NewFunction(name); JSReceiver::SetProperty(global, name, function, SLOPPY).Check(); // Allocate an object. Unrooted after leaving the scope. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_namex, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(obj, prop_namex).ToHandleChecked()); } heap->CollectGarbage(NEW_SPACE); // Function should be alive. CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name)); // Check function is retained. Handle func_value = Object::GetProperty(global, name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); { HandleScope inner_scope(isolate); // Allocate another object, make it reachable from global. Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check(); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); } // After gc, it should survive. heap->CollectGarbage(NEW_SPACE); CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name)); Handle obj = Object::GetProperty(global, obj_name).ToHandleChecked(); CHECK(obj->IsJSObject()); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); } static void VerifyStringAllocation(Isolate* isolate, const char* string) { HandleScope scope(isolate); Handle s = isolate->factory()->NewStringFromUtf8( CStrVector(string)).ToHandleChecked(); CHECK_EQ(StrLength(string), s->length()); for (int index = 0; index < s->length(); index++) { CHECK_EQ(static_cast(string[index]), s->Get(index)); } } TEST(String) { CcTest::InitializeVM(); Isolate* isolate = reinterpret_cast(CcTest::isolate()); VerifyStringAllocation(isolate, "a"); VerifyStringAllocation(isolate, "ab"); VerifyStringAllocation(isolate, "abc"); VerifyStringAllocation(isolate, "abcd"); VerifyStringAllocation(isolate, "fiskerdrengen er paa havet"); } TEST(LocalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* name = "Kasper the spunky"; Handle string = factory->NewStringFromAsciiChecked(name); CHECK_EQ(StrLength(name), string->length()); } TEST(GlobalHandles) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); Handle h1; Handle h2; Handle h3; Handle h4; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); h3 = global_handles->Create(*i); h4 = global_handles->Create(*u); } // after gc, it should survive heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK((*h3)->IsString()); CHECK((*h4)->IsHeapNumber()); CHECK_EQ(*h3, *h1); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h3.location()); CHECK_EQ(*h4, *h2); GlobalHandles::Destroy(h2.location()); GlobalHandles::Destroy(h4.location()); } static bool WeakPointerCleared = false; static void TestWeakGlobalHandleCallback( const v8::WeakCallbackInfo& data) { std::pair*, int>* p = reinterpret_cast*, int>*>( data.GetParameter()); if (p->second == 1234) WeakPointerCleared = true; p->first->Reset(); } TEST(WeakGlobalHandlesScavenge) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scavenge treats weak pointers as normal roots. heap->CollectGarbage(NEW_SPACE); CHECK((*h1)->IsString()); CHECK((*h2)->IsHeapNumber()); CHECK(!WeakPointerCleared); CHECK(!global_handles->IsNearDeath(h2.location())); CHECK(!global_handles->IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); GlobalHandles::Destroy(h2.location()); } TEST(WeakGlobalHandlesMark) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h1; Handle h2; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); Handle u = factory->NewNumber(1.12344); h1 = global_handles->Create(*i); h2 = global_handles->Create(*u); } // Make sure the objects are promoted. heap->CollectGarbage(OLD_SPACE); heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2)); std::pair*, int> handle_and_id(&h2, 1234); GlobalHandles::MakeWeak( h2.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); CHECK(!GlobalHandles::IsNearDeath(h1.location())); CHECK(!GlobalHandles::IsNearDeath(h2.location())); // Incremental marking potentially marked handles before they turned weak. heap->CollectAllGarbage(); CHECK((*h1)->IsString()); CHECK(WeakPointerCleared); CHECK(!GlobalHandles::IsNearDeath(h1.location())); GlobalHandles::Destroy(h1.location()); } TEST(DeleteWeakGlobalHandle) { i::FLAG_stress_compaction = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); GlobalHandles* global_handles = isolate->global_handles(); WeakPointerCleared = false; Handle h; { HandleScope scope(isolate); Handle i = factory->NewStringFromStaticChars("fisk"); h = global_handles->Create(*i); } std::pair*, int> handle_and_id(&h, 1234); GlobalHandles::MakeWeak(h.location(), reinterpret_cast(&handle_and_id), &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter); // Scanvenge does not recognize weak reference. heap->CollectGarbage(NEW_SPACE); CHECK(!WeakPointerCleared); // Mark-compact treats weak reference properly. heap->CollectGarbage(OLD_SPACE); CHECK(WeakPointerCleared); } TEST(DoNotPromoteWhiteObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle white = factory->NewStringFromStaticChars("white"); CHECK(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*white)))); heap->CollectGarbage(NEW_SPACE); CHECK(heap->InNewSpace(*white)); } TEST(PromoteGreyOrBlackObjectsOnScavenge) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); Handle marked = factory->NewStringFromStaticChars("marked"); IncrementalMarking* marking = heap->incremental_marking(); marking->Stop(); heap->StartIncrementalMarking(); while (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(*marked)))) { marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD, IncrementalMarking::FORCE_MARKING, IncrementalMarking::DO_NOT_FORCE_COMPLETION); } heap->CollectGarbage(NEW_SPACE); CHECK(!heap->InNewSpace(*marked)); } TEST(BytecodeArray) { static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a}; static const int kRawBytesSize = sizeof(kRawBytes); static const int kFrameSize = 32; static const int kParameterCount = 2; i::FLAG_manual_evacuation_candidates_selection = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); Factory* factory = isolate->factory(); HandleScope scope(isolate); heap::SimulateFullSpace(heap->old_space()); Handle constant_pool = factory->NewFixedArray(5, TENURED); for (int i = 0; i < 5; i++) { Handle number = factory->NewHeapNumber(i); constant_pool->set(i, *number); } // Allocate and initialize BytecodeArray Handle array = factory->NewBytecodeArray( kRawBytesSize, kRawBytes, kFrameSize, kParameterCount, constant_pool); CHECK(array->IsBytecodeArray()); CHECK_EQ(array->length(), (int)sizeof(kRawBytes)); CHECK_EQ(array->frame_size(), kFrameSize); CHECK_EQ(array->parameter_count(), kParameterCount); CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_LE(array->address(), array->GetFirstBytecodeAddress()); CHECK_GE(array->address() + array->BytecodeArraySize(), array->GetFirstBytecodeAddress() + array->length()); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); CHECK_EQ(array->get(i), kRawBytes[i]); } FixedArray* old_constant_pool_address = *constant_pool; // Perform a full garbage collection and force the constant pool to be on an // evacuation candidate. Page* evac_page = Page::FromAddress(constant_pool->address()); evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); heap->CollectAllGarbage(); // BytecodeArray should survive. CHECK_EQ(array->length(), kRawBytesSize); CHECK_EQ(array->frame_size(), kFrameSize); for (int i = 0; i < kRawBytesSize; i++) { CHECK_EQ(array->get(i), kRawBytes[i]); CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]); } // Constant pool should have been migrated. CHECK_EQ(array->constant_pool(), *constant_pool); CHECK_NE(array->constant_pool(), old_constant_pool_address); } static const char* not_so_random_string_table[] = { "abstract", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "var", "void", "volatile", "while", "with", 0 }; static void CheckInternalizedStrings(const char** strings) { Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); for (const char* string = *strings; *strings != 0; string = *strings++) { HandleScope scope(isolate); Handle a = isolate->factory()->InternalizeUtf8String(CStrVector(string)); // InternalizeUtf8String may return a failure if a GC is needed. CHECK(a->IsInternalizedString()); Handle b = factory->InternalizeUtf8String(string); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); b = isolate->factory()->InternalizeUtf8String(CStrVector(string)); CHECK_EQ(*b, *a); CHECK(b->IsUtf8EqualTo(CStrVector(string))); } } TEST(StringTable) { CcTest::InitializeVM(); v8::HandleScope sc(CcTest::isolate()); CheckInternalizedStrings(not_so_random_string_table); CheckInternalizedStrings(not_so_random_string_table); } TEST(FunctionAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle twenty_three(Smi::FromInt(23), isolate); Handle twenty_four(Smi::FromInt(24), isolate); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check that we can add properties to function objects. JSReceiver::SetProperty(function, prop_name, twenty_four, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(24), *Object::GetProperty(function, prop_name).ToHandleChecked()); } TEST(ObjectProperties) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); // check for empty CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); // delete first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete first and then second CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // add first and then second JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second)); // delete second and then first CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY)); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first)); CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second)); // check string and internalized string match const char* string1 = "fisk"; Handle s1 = factory->NewStringFromAsciiChecked(string1); JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check(); Handle s1_string = factory->InternalizeUtf8String(string1); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string)); // check internalized string and string match const char* string2 = "fugl"; Handle s2_string = factory->InternalizeUtf8String(string2); JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check(); Handle s2 = factory->NewStringFromAsciiChecked(string2); CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2)); } TEST(JSObjectMaps) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("theFunction"); Handle function = factory->NewFunction(name); Handle prop_name = factory->InternalizeUtf8String("theSlot"); Handle obj = factory->NewJSObject(function); Handle initial_map(function->initial_map()); // Set a propery Handle twenty_three(Smi::FromInt(23), isolate); JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(23), *Object::GetProperty(obj, prop_name).ToHandleChecked()); // Check the map has changed CHECK(*initial_map != obj->map()); } TEST(JSArray) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle name = factory->InternalizeUtf8String("Array"); Handle fun_obj = Object::GetProperty( CcTest::i_isolate()->global_object(), name).ToHandleChecked(); Handle function = Handle::cast(fun_obj); // Allocate the object. Handle element; Handle object = factory->NewJSObject(function); Handle array = Handle::cast(object); // We just initialized the VM, no heap allocation failure yet. JSArray::Initialize(array, 0); // Set array length to 0. JSArray::SetLength(array, 0); CHECK_EQ(Smi::FromInt(0), array->length()); // Must be in fast mode. CHECK(array->HasFastSmiOrObjectElements()); // array[length] = name. JSReceiver::SetElement(isolate, array, 0, name, SLOPPY).Check(); CHECK_EQ(Smi::FromInt(1), array->length()); element = i::Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); // Set array length with larger than smi value. JSArray::SetLength(array, static_cast(Smi::kMaxValue) + 1); uint32_t int_length = 0; CHECK(array->length()->ToArrayIndex(&int_length)); CHECK_EQ(static_cast(Smi::kMaxValue) + 1, int_length); CHECK(array->HasDictionaryElements()); // Must be in slow mode. // array[length] = name. JSReceiver::SetElement(isolate, array, int_length, name, SLOPPY).Check(); uint32_t new_int_length = 0; CHECK(array->length()->ToArrayIndex(&new_int_length)); CHECK_EQ(static_cast(int_length), new_int_length - 1); element = Object::GetElement(isolate, array, int_length).ToHandleChecked(); CHECK_EQ(*element, *name); element = Object::GetElement(isolate, array, 0).ToHandleChecked(); CHECK_EQ(*element, *name); } TEST(JSObjectCopy) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope sc(CcTest::isolate()); Handle object_string(String::cast(CcTest::heap()->Object_string())); Handle object = Object::GetProperty( CcTest::i_isolate()->global_object(), object_string).ToHandleChecked(); Handle constructor = Handle::cast(object); Handle obj = factory->NewJSObject(constructor); Handle first = factory->InternalizeUtf8String("first"); Handle second = factory->InternalizeUtf8String("second"); Handle one(Smi::FromInt(1), isolate); Handle two(Smi::FromInt(2), isolate); JSReceiver::SetProperty(obj, first, one, SLOPPY).Check(); JSReceiver::SetProperty(obj, second, two, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 0, first, SLOPPY).Check(); JSReceiver::SetElement(isolate, obj, 1, second, SLOPPY).Check(); // Make the clone. Handle value1, value2; Handle clone = factory->CopyJSObject(obj); CHECK(!clone.is_identical_to(obj)); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); // Flip the values. JSReceiver::SetProperty(clone, first, two, SLOPPY).Check(); JSReceiver::SetProperty(clone, second, one, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 0, second, SLOPPY).Check(); JSReceiver::SetElement(isolate, clone, 1, first, SLOPPY).Check(); value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetElement(isolate, obj, 0).ToHandleChecked(); value2 = Object::GetElement(isolate, clone, 1).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, second).ToHandleChecked(); value2 = Object::GetProperty(clone, first).ToHandleChecked(); CHECK_EQ(*value1, *value2); value1 = Object::GetProperty(obj, first).ToHandleChecked(); value2 = Object::GetProperty(clone, second).ToHandleChecked(); CHECK_EQ(*value1, *value2); } TEST(StringAllocation) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 }; for (int length = 0; length < 100; length++) { v8::HandleScope scope(CcTest::isolate()); char* non_one_byte = NewArray(3 * length + 1); char* one_byte = NewArray(length + 1); non_one_byte[3 * length] = 0; one_byte[length] = 0; for (int i = 0; i < length; i++) { one_byte[i] = 'a'; non_one_byte[3 * i] = chars[0]; non_one_byte[3 * i + 1] = chars[1]; non_one_byte[3 * i + 2] = chars[2]; } Handle non_one_byte_sym = factory->InternalizeUtf8String( Vector(non_one_byte, 3 * length)); CHECK_EQ(length, non_one_byte_sym->length()); Handle one_byte_sym = factory->InternalizeOneByteString(OneByteVector(one_byte, length)); CHECK_EQ(length, one_byte_sym->length()); Handle non_one_byte_str = factory->NewStringFromUtf8(Vector(non_one_byte, 3 * length)) .ToHandleChecked(); non_one_byte_str->Hash(); CHECK_EQ(length, non_one_byte_str->length()); Handle one_byte_str = factory->NewStringFromUtf8(Vector(one_byte, length)) .ToHandleChecked(); one_byte_str->Hash(); CHECK_EQ(length, one_byte_str->length()); DeleteArray(non_one_byte); DeleteArray(one_byte); } } static int ObjectsFoundInHeap(Heap* heap, Handle objs[], int size) { // Count the number of objects found in the heap. int found_count = 0; HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { for (int i = 0; i < size; i++) { if (*objs[i] == obj) { found_count++; } } } return found_count; } TEST(Iteration) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); // Array of objects to scan haep for. const int objs_count = 6; Handle objs[objs_count]; int next_objs_index = 0; // Allocate a JS array to OLD_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewJSArray(10); objs[next_objs_index++] = factory->NewJSArray(10, FAST_HOLEY_ELEMENTS, TENURED); // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij"); objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij", TENURED); // Allocate a large string (for large object space). int large_size = Page::kMaxRegularHeapObjectSize + 1; char* str = new char[large_size]; for (int i = 0; i < large_size - 1; ++i) str[i] = 'a'; str[large_size - 1] = '\0'; objs[next_objs_index++] = factory->NewStringFromAsciiChecked(str, TENURED); delete[] str; // Add a Map object to look for. objs[next_objs_index++] = Handle(HeapObject::cast(*objs[0])->map()); CHECK_EQ(objs_count, next_objs_index); CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count)); } UNINITIALIZED_TEST(TestCodeFlushing) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); i::Isolate* i_isolate = reinterpret_cast(isolate); isolate->Enter(); Factory* factory = i_isolate->factory(); { v8::HandleScope scope(isolate); v8::Context::New(isolate)->Enter(); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(isolate); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(i_isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. i_isolate->heap()->CollectAllGarbage(); i_isolate->heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { i_isolate->heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } isolate->Exit(); isolate->Dispose(); } TEST(TestCodeFlushingPreAged) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // Compile foo, but don't run it. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code has been run so will survive at least one GC. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // The code was only run once, so it should be pre-aged and collected on the // next GC. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); // Execute the function again twice, and ensure it is reset to the young age. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();" "foo();"); } // The code will survive at least two GC now that it is young again. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use full marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { CcTest::heap()->CollectAllGarbage(); } // foo should no longer be in the compilation cache CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // Call foo to get it recompiled. CompileRun("foo()"); CHECK(function->shared()->is_compiled()); CHECK(function->is_compiled()); } TEST(TestCodeFlushingIncremental) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Simulate several GCs that use incremental marking. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); CcTest::heap()->CollectAllGarbage(); } CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); // This compile will compile the function again. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo();"); } // Simulate several GCs that use incremental marking but make sure // the loop breaks once the function is enqueued as a candidate. for (int i = 0; i < kAgingThreshold; i++) { heap::SimulateIncrementalMarking(CcTest::heap()); if (!function->next_function_link()->IsUndefined(CcTest::i_isolate())) break; CcTest::heap()->CollectAllGarbage(); } // Force optimization while incremental marking is active and while // the function is enqueued as a candidate. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestCodeFlushingIncrementalScavenge) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); const char* source = "var foo = function() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo();" "var bar = function() {" " var x = 23;" "};" "bar();"; Handle foo_name = factory->InternalizeUtf8String("foo"); Handle bar_name = factory->InternalizeUtf8String("bar"); // Perfrom one initial GC to enable code flushing. CcTest::heap()->CollectAllGarbage(); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check functions are compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); Handle func_value2 = Object::GetProperty(isolate->global_object(), bar_name).ToHandleChecked(); CHECK(func_value2->IsJSFunction()); Handle function2 = Handle::cast(func_value2); CHECK(function2->shared()->is_compiled()); // Clear references to functions so that one of them can die. { v8::HandleScope scope(CcTest::isolate()); CompileRun("foo = 0; bar = 0;"); } // Bump the code age so that flushing is triggered while the function // object is still located in new-space. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); function2->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the functions are enqueued as // code flushing candidates. Then kill one of the functions. Finally // perform a scavenge while incremental marking is still running. heap::SimulateIncrementalMarking(CcTest::heap()); *function2.location() = NULL; CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking"); // Simulate one final GC to make sure the candidate queue is sane. CcTest::heap()->CollectAllGarbage(); CHECK(!function->shared()->is_compiled() || function->IsOptimized()); CHECK(!function->is_compiled() || function->IsOptimized()); } TEST(TestCodeFlushingIncrementalAbort) { // If we do not flush code this test is invalid. if (!FLAG_flush_code) return; i::FLAG_allow_natives_syntax = true; i::FLAG_optimize_for_size = false; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); const char* source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle foo_name = factory->InternalizeUtf8String("foo"); // This compile will add the code to the compilation cache. { v8::HandleScope scope(CcTest::isolate()); CompileRun(source); } // Check function is compiled. Handle func_value = Object::GetProperty(isolate->global_object(), foo_name).ToHandleChecked(); CHECK(func_value->IsJSFunction()); Handle function = Handle::cast(func_value); CHECK(function->shared()->is_compiled()); // The code will survive at least two GCs. heap->CollectAllGarbage(); heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled()); // Bump the code age so that flushing is triggered. const int kAgingThreshold = 6; for (int i = 0; i < kAgingThreshold; i++) { function->shared()->code()->MakeOlder(static_cast(i % 2)); } // Simulate incremental marking so that the function is enqueued as // code flushing candidate. heap::SimulateIncrementalMarking(heap); // Enable the debugger and add a breakpoint while incremental marking // is running so that incremental marking aborts and code flushing is // disabled. int position = 0; Handle breakpoint_object(Smi::FromInt(0), isolate); EnableDebugger(CcTest::isolate()); isolate->debug()->SetBreakPoint(function, breakpoint_object, &position); isolate->debug()->ClearAllBreakPoints(); DisableDebugger(CcTest::isolate()); // Force optimization now that code flushing is disabled. { v8::HandleScope scope(CcTest::isolate()); CompileRun("%OptimizeFunctionOnNextCall(foo); foo();"); } // Simulate one final GC to make sure the candidate queue is sane. heap->CollectAllGarbage(); CHECK(function->shared()->is_compiled() || !function->IsOptimized()); CHECK(function->is_compiled() || !function->IsOptimized()); } TEST(TestUseOfIncrementalBarrierOnCompileLazy) { // Turn off always_opt because it interferes with running the built-in for // the last call to g(). i::FLAG_always_opt = false; i::FLAG_allow_natives_syntax = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); v8::HandleScope scope(CcTest::isolate()); CompileRun( "function make_closure(x) {" " return function() { return x + 3 };" "}" "var f = make_closure(5); f();" "var g = make_closure(5);"); // Check f is compiled. Handle f_name = factory->InternalizeUtf8String("f"); Handle f_value = Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked(); Handle f_function = Handle::cast(f_value); CHECK(f_function->is_compiled()); // Check g is not compiled. Handle g_name = factory->InternalizeUtf8String("g"); Handle g_value = Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked(); Handle g_function = Handle::cast(g_value); CHECK(!g_function->is_compiled()); heap::SimulateIncrementalMarking(heap); CompileRun("%OptimizeFunctionOnNextCall(f); f();"); // g should now have available an optimized function, unmarked by gc. The // CompileLazy built-in will discover it and install it in the closure, and // the incremental write barrier should be used. CompileRun("g();"); CHECK(g_function->is_compiled()); } TEST(CompilationCacheCachingBehavior) { // If we do not flush code, or have the compilation cache turned off, this // test is invalid. if (!FLAG_flush_code || !FLAG_compilation_cache) { return; } CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); CompilationCache* compilation_cache = isolate->compilation_cache(); LanguageMode language_mode = construct_language_mode(FLAG_use_strict); v8::HandleScope scope(CcTest::isolate()); const char* raw_source = "function foo() {" " var x = 42;" " var y = 42;" " var z = x + y;" "};" "foo()"; Handle source = factory->InternalizeUtf8String(raw_source); Handle native_context = isolate->native_context(); { v8::HandleScope scope(CcTest::isolate()); CompileRun(raw_source); } // The script should be in the cache now. MaybeHandle info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); // Check that the code cache entry survives at least on GC. // (Unless --optimize-for-size, in which case it might get collected // immediately.) if (!FLAG_optimize_for_size) { heap->CollectAllGarbage(); info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(!info.is_null()); } // Progress code age until it's old and ready for GC. while (!info.ToHandleChecked()->code()->IsOld()) { // To guarantee progress, we have to MakeOlder with different parities. // We can't just use NO_MARKING_PARITY, since e.g. kExecutedOnceCodeAge is // always NO_MARKING_PARITY and the code age only progresses if the parity // is different. info.ToHandleChecked()->code()->MakeOlder(ODD_MARKING_PARITY); info.ToHandleChecked()->code()->MakeOlder(EVEN_MARKING_PARITY); } heap->CollectAllGarbage(); // Ensure code aging cleared the entry from the cache. info = compilation_cache->LookupScript( source, Handle(), 0, 0, v8::ScriptOriginOptions(false, true, false), native_context, language_mode); CHECK(info.is_null()); } static void OptimizeEmptyFunction(const char* name) { HandleScope scope(CcTest::i_isolate()); EmbeddedVector source; SNPrintF(source, "function %s() { return 0; }" "%s(); %s();" "%%OptimizeFunctionOnNextCall(%s);" "%s();", name, name, name, name, name); CompileRun(source.start()); } // Count the number of native contexts in the weak list of native contexts. int CountNativeContexts() { int count = 0; Object* object = CcTest::heap()->native_contexts_list(); while (!object->IsUndefined(CcTest::i_isolate())) { count++; object = Context::cast(object)->next_context_link(); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context. static int CountOptimizedUserFunctions(v8::Local context) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST); while (object->IsJSFunction() && !JSFunction::cast(object)->shared()->IsBuiltin()) { count++; object = JSFunction::cast(object)->next_function_link(); } return count; } TEST(TestInternalWeakLists) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); // Some flags turn Scavenge collections into Mark-sweep collections // and hence are incompatible with this test case. if (FLAG_gc_global || FLAG_stress_compaction) return; FLAG_retain_maps_for_n_gc = 0; static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); Heap* heap = isolate->heap(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create a number of global contests which gets linked together. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); // Collect garbage that might have been created by one of the // installed extensions. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(i + 1, CountNativeContexts()); ctx[i]->Enter(); // Create a handle scope so no function objects get stuck in the outer // handle scope. HandleScope scope(isolate); CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); // Remove function f1, and CompileRun("f1=null"); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[i])); } // Mark compact handles the weak references. isolate->compilation_cache()->Clear(); heap->CollectAllGarbage(); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); // Get rid of f3 and f5 in the same way. CompileRun("f3=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); CompileRun("f5=null"); for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(NEW_SPACE); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i])); } CcTest::heap()->CollectAllGarbage(); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i])); ctx[i]->Exit(); } // Force compilation cache cleanup. CcTest::heap()->NotifyContextDisposed(true); CcTest::heap()->CollectAllGarbage(); // Dispose the native contexts one by one. for (int i = 0; i < kNumTestContexts; i++) { // TODO(dcarney): is there a better way to do this? i::Object** unsafe = reinterpret_cast(*ctx[i]); *unsafe = CcTest::heap()->undefined_value(); ctx[i].Clear(); // Scavenge treats these references as strong. for (int j = 0; j < 10; j++) { CcTest::heap()->CollectGarbage(i::NEW_SPACE); CHECK_EQ(kNumTestContexts - i, CountNativeContexts()); } // Mark compact handles the weak references. CcTest::heap()->CollectAllGarbage(); CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts()); } CHECK_EQ(0, CountNativeContexts()); } // Count the number of native contexts in the weak list of native contexts // causing a GC after the specified number of elements. static int CountNativeContextsWithGC(Isolate* isolate, int n) { Heap* heap = isolate->heap(); int count = 0; Handle object(heap->native_contexts_list(), isolate); while (!object->IsUndefined(isolate)) { count++; if (count == n) heap->CollectAllGarbage(); object = Handle(Context::cast(*object)->next_context_link(), isolate); } return count; } // Count the number of user functions in the weak list of optimized // functions attached to a native context causing a GC after the // specified number of elements. static int CountOptimizedUserFunctionsWithGC(v8::Local context, int n) { int count = 0; Handle icontext = v8::Utils::OpenHandle(*context); Isolate* isolate = icontext->GetIsolate(); Handle object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST), isolate); while (object->IsJSFunction() && !Handle::cast(object)->shared()->IsBuiltin()) { count++; if (count == n) isolate->heap()->CollectAllGarbage(); object = Handle( Object::cast(JSFunction::cast(*object)->next_function_link()), isolate); } return count; } TEST(TestInternalWeakListsTraverseWithGC) { FLAG_always_opt = false; FLAG_allow_natives_syntax = true; v8::V8::Initialize(); static const int kNumTestContexts = 10; Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); v8::Local ctx[kNumTestContexts]; if (!isolate->use_crankshaft()) return; CHECK_EQ(0, CountNativeContexts()); // Create an number of contexts and check the length of the weak list both // with and without GCs while iterating the list. for (int i = 0; i < kNumTestContexts; i++) { ctx[i] = v8::Context::New(CcTest::isolate()); CHECK_EQ(i + 1, CountNativeContexts()); CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1)); } ctx[0]->Enter(); // Compile a number of functions the length of the weak list of optimized // functions both with and without GCs while iterating the list. CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0])); OptimizeEmptyFunction("f1"); CHECK_EQ(1, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(1, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f2"); CHECK_EQ(2, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(2, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f3"); CHECK_EQ(3, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(3, CountOptimizedUserFunctionsWithGC(ctx[0], 1)); OptimizeEmptyFunction("f4"); CHECK_EQ(4, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(4, CountOptimizedUserFunctionsWithGC(ctx[0], 2)); OptimizeEmptyFunction("f5"); CHECK_EQ(5, CountOptimizedUserFunctions(ctx[0])); CHECK_EQ(5, CountOptimizedUserFunctionsWithGC(ctx[0], 4)); ctx[0]->Exit(); } TEST(TestSizeOfRegExpCode) { if (!FLAG_regexp_optimization) return; v8::V8::Initialize(); Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); LocalContext context; // Adjust source below and this check to match // RegExpImple::kRegExpTooLargeToOptimize. CHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB); // Compile a regexp that is much larger if we are using regexp optimizations. CompileRun( "var reg_exp_source = '(?:a|bc|def|ghij|klmno|pqrstu)';" "var half_size_reg_exp;" "while (reg_exp_source.length < 20 * 1024) {" " half_size_reg_exp = reg_exp_source;" " reg_exp_source = reg_exp_source + reg_exp_source;" "}" // Flatten string. "reg_exp_source.match(/f/);"); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(reg_exp_source);"); CcTest::heap()->CollectAllGarbage(); int size_with_regexp = static_cast(CcTest::heap()->SizeOfObjects()); CompileRun("'foo'.match(half_size_reg_exp);"); CcTest::heap()->CollectAllGarbage(); int size_with_optimized_regexp = static_cast(CcTest::heap()->SizeOfObjects()); int size_of_regexp_code = size_with_regexp - initial_size; // On some platforms the debug-code flag causes huge amounts of regexp code // to be emitted, breaking this test. if (!FLAG_debug_code) { CHECK_LE(size_of_regexp_code, 1 * MB); } // Small regexp is half the size, but compiles to more than twice the code // due to the optimization steps. CHECK_GE(size_with_optimized_regexp, size_with_regexp + size_of_regexp_code * 2); } HEAP_TEST(TestSizeOfObjects) { v8::V8::Initialize(); // Get initial heap size after several full GCs, which will stabilize // the heap size and return with sweeping finished completely. CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); CcTest::heap()->CollectAllGarbage(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } int initial_size = static_cast(CcTest::heap()->SizeOfObjects()); { // Allocate objects on several different old-space pages so that // concurrent sweeper threads will be busy sweeping the old space on // subsequent GC runs. AlwaysAllocateScope always_allocate(CcTest::i_isolate()); int filler_size = static_cast(FixedArray::SizeFor(8192)); for (int i = 1; i <= 100; i++) { CcTest::heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked(); CHECK_EQ(initial_size + i * filler_size, static_cast(CcTest::heap()->SizeOfObjects())); } } // The heap size should go back to initial size after a full GC, even // though sweeping didn't finish yet. CcTest::heap()->CollectAllGarbage(); // Normally sweeping would not be complete here, but no guarantees. CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); // Waiting for sweeper threads should not change heap size. if (collector->sweeping_in_progress()) { collector->EnsureSweepingCompleted(); } CHECK_EQ(initial_size, static_cast(CcTest::heap()->SizeOfObjects())); } TEST(TestAlignmentCalculations) { // Maximum fill amounts are consistent. int maximum_double_misalignment = kDoubleSize - kPointerSize; int maximum_simd128_misalignment = kSimd128Size - kPointerSize; int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned); CHECK_EQ(0, max_word_fill); int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned); CHECK_EQ(maximum_double_misalignment, max_double_fill); int max_double_unaligned_fill = Heap::GetMaximumFillToAlign(kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, max_double_unaligned_fill); int max_simd128_unaligned_fill = Heap::GetMaximumFillToAlign(kSimd128Unaligned); CHECK_EQ(maximum_simd128_misalignment, max_simd128_unaligned_fill); Address base = static_cast(NULL); int fill = 0; // Word alignment never requires fill. fill = Heap::GetFillToAlign(base, kWordAligned); CHECK_EQ(0, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kWordAligned); CHECK_EQ(0, fill); // No fill is required when address is double aligned. fill = Heap::GetFillToAlign(base, kDoubleAligned); CHECK_EQ(0, fill); // Fill is required if address is not double aligned. fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleAligned); CHECK_EQ(maximum_double_misalignment, fill); // kDoubleUnaligned has the opposite fill amounts. fill = Heap::GetFillToAlign(base, kDoubleUnaligned); CHECK_EQ(maximum_double_misalignment, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kDoubleUnaligned); CHECK_EQ(0, fill); // 128 bit SIMD types have 2 or 4 possible alignments, depending on platform. fill = Heap::GetFillToAlign(base, kSimd128Unaligned); CHECK_EQ((3 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + kPointerSize, kSimd128Unaligned); CHECK_EQ((2 * kPointerSize) & kSimd128AlignmentMask, fill); fill = Heap::GetFillToAlign(base + 2 * kPointerSize, kSimd128Unaligned); CHECK_EQ(kPointerSize, fill); fill = Heap::GetFillToAlign(base + 3 * kPointerSize, kSimd128Unaligned); CHECK_EQ(0, fill); } static HeapObject* NewSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->new_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get new space allocation into the desired alignment. static Address AlignNewSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); if (fill) { NewSpaceAllocateAligned(fill + offset, kWordAligned); } return *top_addr; } TEST(TestAlignedAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address* top_addr = CcTest::heap()->new_space()->allocation_top_address(); Address start; HeapObject* obj; HeapObject* filler; if (double_misalignment) { // Allocate a pointer sized object that must be double aligned at an // aligned address. start = AlignNewSpace(kDoubleAligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); // Allocate a second pointer sized object that must be double aligned at an // unaligned address. start = AlignNewSpace(kDoubleAligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); // Similarly for kDoubleUnaligned. start = AlignNewSpace(kDoubleUnaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kDoubleUnaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + double_misalignment, *top_addr - start); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignNewSpace(kSimd128Unaligned, 0); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is no filler. CHECK_EQ(kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kSimd128Size - kPointerSize); CHECK_EQ(kPointerSize + kSimd128Size - kPointerSize, *top_addr - start); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignNewSpace(kSimd128Unaligned, 2 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == 2 * kPointerSize); CHECK_EQ(kPointerSize + 2 * kPointerSize, *top_addr - start); start = AlignNewSpace(kSimd128Unaligned, 3 * kPointerSize); obj = NewSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler = HeapObject::FromAddress(start); CHECK(obj != filler && filler->IsFiller() && filler->Size() == kPointerSize); CHECK_EQ(kPointerSize + kPointerSize, *top_addr - start); } } static HeapObject* OldSpaceAllocateAligned(int size, AllocationAlignment alignment) { Heap* heap = CcTest::heap(); AllocationResult allocation = heap->old_space()->AllocateRawAligned(size, alignment); HeapObject* obj = NULL; allocation.To(&obj); heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); return obj; } // Get old space allocation into the desired alignment. static Address AlignOldSpace(AllocationAlignment alignment, int offset) { Address* top_addr = CcTest::heap()->old_space()->allocation_top_address(); int fill = Heap::GetFillToAlign(*top_addr, alignment); int allocation = fill + offset; if (allocation) { OldSpaceAllocateAligned(allocation, kWordAligned); } Address top = *top_addr; // Now force the remaining allocation onto the free list. CcTest::heap()->old_space()->EmptyAllocationInfo(); return top; } // Test the case where allocation must be done from the free list, so filler // may precede or follow the object. TEST(TestAlignedOverAllocation) { // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. const intptr_t double_misalignment = kDoubleSize - kPointerSize; Address start; HeapObject* obj; HeapObject* filler1; HeapObject* filler2; if (double_misalignment) { start = AlignOldSpace(kDoubleAligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleAligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1); CHECK(filler1->IsFiller()); CHECK(filler1->Size() == kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Similarly for kDoubleUnaligned. start = AlignOldSpace(kDoubleUnaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); // The object is aligned, and a filler object is created after. CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); // Try the opposite alignment case. start = AlignOldSpace(kDoubleUnaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); } // Now test SIMD alignment. There are 2 or 4 possible alignments, depending // on platform. start = AlignOldSpace(kSimd128Unaligned, 0); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object after the object. filler1 = HeapObject::FromAddress(start + kPointerSize); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); start = AlignOldSpace(kSimd128Unaligned, kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There is a filler object before the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kSimd128Size - kPointerSize); if (double_misalignment) { // Test the 2 other alignments possible on 32 bit platforms. start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == 2 * kPointerSize); filler2 = HeapObject::FromAddress(start + 3 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == kPointerSize); start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize); obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned); CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize)); // There are filler objects before and after the object. filler1 = HeapObject::FromAddress(start); CHECK(obj != filler1 && filler1->IsFiller() && filler1->Size() == kPointerSize); filler2 = HeapObject::FromAddress(start + 2 * kPointerSize); CHECK(obj != filler2 && filler2->IsFiller() && filler2->Size() == 2 * kPointerSize); } } TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { CcTest::InitializeVM(); HeapIterator iterator(CcTest::heap()); intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects(); intptr_t size_of_objects_2 = 0; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (!obj->IsFreeSpace()) { size_of_objects_2 += obj->Size(); } } // Delta must be within 5% of the larger result. // TODO(gc): Tighten this up by distinguishing between byte // arrays that are real and those that merely mark free space // on the heap. if (size_of_objects_1 > size_of_objects_2) { intptr_t delta = size_of_objects_1 - size_of_objects_2; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_1 / 20, delta); } else { intptr_t delta = size_of_objects_2 - size_of_objects_1; PrintF("Heap::SizeOfObjects: %" V8PRIdPTR ", " "Iterator: %" V8PRIdPTR ", " "delta: %" V8PRIdPTR "\n", size_of_objects_1, size_of_objects_2, delta); CHECK_GT(size_of_objects_2 / 20, delta); } } static void FillUpNewSpace(NewSpace* new_space) { // Fill up new space to the point that it is completely full. Make sure // that the scavenger does not undo the filling. Heap* heap = new_space->heap(); Isolate* isolate = heap->isolate(); Factory* factory = isolate->factory(); HandleScope scope(isolate); AlwaysAllocateScope always_allocate(isolate); intptr_t available = new_space->Capacity() - new_space->Size(); intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; for (intptr_t i = 0; i < number_of_fillers; i++) { CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); } } TEST(GrowAndShrinkNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); NewSpace* new_space = heap->new_space(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } // Explicitly growing should double the space capacity. intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); old_capacity = new_space->TotalCapacity(); FillUpNewSpace(new_space); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Explicitly shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); // Let the scavenger empty the new space. heap->CollectGarbage(NEW_SPACE); CHECK_LE(new_space->Size(), old_capacity); // Explicitly shrinking should halve the space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == 2 * new_capacity); // Consecutive shrinking should not affect space capacity. old_capacity = new_space->TotalCapacity(); new_space->Shrink(); new_space->Shrink(); new_space->Shrink(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } TEST(CollectingAllAvailableGarbageShrinksNewSpace) { CcTest::InitializeVM(); Heap* heap = CcTest::heap(); if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { return; } v8::HandleScope scope(CcTest::isolate()); NewSpace* new_space = heap->new_space(); intptr_t old_capacity, new_capacity; old_capacity = new_space->TotalCapacity(); new_space->Grow(); new_capacity = new_space->TotalCapacity(); CHECK(2 * old_capacity == new_capacity); FillUpNewSpace(new_space); heap->CollectAllAvailableGarbage(); new_capacity = new_space->TotalCapacity(); CHECK(old_capacity == new_capacity); } static int NumberOfGlobalObjects() { int count = 0; HeapIterator iterator(CcTest::heap()); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsJSGlobalObject()) count++; } return count; } // Test that we don't embed maps from foreign contexts into // optimized code. TEST(LeakNativeContextViaMap) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = {x: 42}"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o.x; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); v8::Local::New(isolate, ctx1)->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } // Test that we don't embed functions from foreign contexts into // optimized code. TEST(LeakNativeContextViaFunction) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = function() { return 42; }"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f(x) { return x(); }" "for (var i = 0; i < 10; ++i) f(o);" "%OptimizeFunctionOnNextCall(f);" "f(o);"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapKeyed) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent ctx2p; { v8::HandleScope scope(isolate); ctx1p.Reset(isolate, v8::Context::New(isolate)); ctx2p.Reset(isolate, v8::Context::New(isolate)); v8::Local::New(isolate, ctx1p)->Enter(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(2, NumberOfGlobalObjects()); { v8::HandleScope inner_scope(isolate); CompileRun("var v = [42, 43]"); v8::Local ctx1 = v8::Local::New(isolate, ctx1p); v8::Local ctx2 = v8::Local::New(isolate, ctx2p); v8::Local v = ctx1->Global()->Get(ctx1, v8_str("v")).ToLocalChecked(); ctx2->Enter(); CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust()); v8::Local res = CompileRun( "function f() { return o[0]; }" "for (var i = 0; i < 10; ++i) f();" "%OptimizeFunctionOnNextCall(f);" "f();"); CHECK_EQ(42, res->Int32Value(ctx2).FromJust()); CHECK(ctx2->Global() ->Set(ctx2, v8_str("o"), v8::Int32::New(isolate, 0)) .FromJust()); ctx2->Exit(); ctx1->Exit(); ctx1p.Reset(); isolate->ContextDisposedNotification(); } CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(1, NumberOfGlobalObjects()); ctx2p.Reset(); CcTest::heap()->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } TEST(LeakNativeContextViaMapProto) { i::FLAG_allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope outer_scope(isolate); v8::Persistent ctx1p; v8::Persistent