Update V8 to r5091 as required by WebKit r63859.
Change-Id: I8e35d765e6f6c7f89eccff900e1cabe2d5dd6110
diff --git a/ChangeLog b/ChangeLog
index 602ad80..636d999 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,36 @@
+2010-07-19: Version 2.3.1
+
+ Fixed compilation and linking with V8_INTERPRETED_REGEXP flag.
+
+ Fixed bug related to code flushing while compiling a lazy
+ compilable function (issue http://crbug.com/49099).
+
+ Performance improvements on all platforms.
+
+
+2010-07-15: Version 2.3.0
+
+ Added ES5 Object.seal and Object.isSealed.
+
+ Added debugger API for scheduling debugger commands from a
+ separate thread.
+
+
+2010-07-14: Version 2.2.24
+
+ Added API for capturing stack traces for uncaught exceptions.
+
+ Fixed crash bug when preparsing from a non-external V8 string
+ (issue 775).
+
+ Fixed JSON.parse bug causing input not to be converted to string
+ (issue 764).
+
+ Added ES5 Object.freeze and Object.isFrozen.
+
+ Performance improvements on all platforms.
+
+
2010-07-07: Version 2.2.23
API change: Convert Unicode code points outside the basic multilingual
@@ -11,6 +44,7 @@
Performance improvements on all platforms.
+
2010-07-05: Version 2.2.22
Added ES5 Object.isExtensible and Object.preventExtensions.
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 6ddbe61..478fbdf 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We use a V8 revision that has been used for a Chromium release.
-http://src.chromium.org/svn/releases/6.0.466.1/DEPS
-http://v8.googlecode.com/svn/trunk@5033
+http://src.chromium.org/svn/releases/6.0.474.0/DEPS
+http://v8.googlecode.com/svn/trunk@5091
diff --git a/include/v8-debug.h b/include/v8-debug.h
index c53b634..414fd86 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -76,7 +76,8 @@
NewFunction = 3,
BeforeCompile = 4,
AfterCompile = 5,
- ScriptCollected = 6
+ ScriptCollected = 6,
+ BreakForCommand = 7
};
@@ -172,6 +173,13 @@
*/
virtual Handle<Value> GetCallbackData() const = 0;
+ /**
+ * Client data passed to DebugBreakForCommand function. The
+ * debugger takes ownership of the data and will delete it even if
+ * there is no message handler.
+ */
+ virtual ClientData* GetClientData() const = 0;
+
virtual ~EventDetails() {}
};
@@ -248,6 +256,12 @@
// Break execution of JavaScript.
static void DebugBreak();
+ // Break execution of JavaScript (this method can be invoked from a
+ // non-VM thread) for further client command execution on a VM
+ // thread. Client data is then passed in EventDetails to
+ // EventCallback at the moment when the VM actually stops.
+ static void DebugBreakForCommand(ClientData* data = NULL);
+
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
static void SetMessageHandler(MessageHandler handler,
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 3e1952c..c99eb0d 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -258,6 +258,12 @@
*/
Handle<String> GetName() const;
+ /**
+ * Returns node id. For the same heap object, the id remains the same
+ * across all snapshots.
+ */
+ uint64_t GetId() const;
+
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
@@ -290,6 +296,16 @@
};
+class V8EXPORT HeapSnapshotsDiff {
+ public:
+ /** Returns the root node for added nodes. */
+ const HeapGraphNode* GetAdditionsRoot() const;
+
+ /** Returns the root node for deleted nodes. */
+ const HeapGraphNode* GetDeletionsRoot() const;
+};
+
+
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
@@ -302,7 +318,10 @@
Handle<String> GetTitle() const;
/** Returns the root node of the heap graph. */
- const HeapGraphNode* GetHead() const;
+ const HeapGraphNode* GetRoot() const;
+
+ /** Returns a diff between this snapshot and another one. */
+ const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
};
diff --git a/include/v8.h b/include/v8.h
index ca4a247..9e4cebb 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -694,6 +694,13 @@
Handle<Value> GetScriptData() const;
/**
+ * Exception stack trace. By default stack traces are not captured for
+ * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
+ * to change this option.
+ */
+ Handle<StackTrace> GetStackTrace() const;
+
+ /**
* Returns the number, 1-based, of the line where the error occurred.
*/
int GetLineNumber() const;
@@ -2459,6 +2466,15 @@
static void RemoveMessageListeners(MessageCallback that);
/**
+ * Tells V8 to capture current stack trace when uncaught exception occurs
+ * and report it to the message listeners. The option is off by default.
+ */
+ static void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit = 10,
+ StackTrace::StackTraceOptions options = StackTrace::kOverview);
+
+ /**
* Sets V8 flags from a string.
*/
static void SetFlagsFromString(const char* str, int length);
diff --git a/src/accessors.cc b/src/accessors.cc
index e41db94..ed0bbd7 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -549,8 +549,8 @@
if (frame->function() != *function) continue;
// If there is an arguments variable in the stack, we return that.
- int index = ScopeInfo<>::StackSlotIndex(frame->code(),
- Heap::arguments_symbol());
+ int index = function->shared()->scope_info()->
+ StackSlotIndex(Heap::arguments_symbol());
if (index >= 0) {
Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
if (!arguments->IsTheHole()) return *arguments;
diff --git a/src/api.cc b/src/api.cc
index 0f64dd4..48c64b3 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1438,6 +1438,22 @@
}
+v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
+ if (IsDeadCheck("v8::Message::GetStackTrace()")) {
+ return Local<v8::StackTrace>();
+ }
+ ENTER_V8;
+ HandleScope scope;
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::Object> stackFramesObj = GetProperty(obj, "stackFrames");
+ if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
+ i::Handle<i::JSArray> stackTrace =
+ i::Handle<i::JSArray>::cast(stackFramesObj);
+ return scope.Close(Utils::StackTraceToLocal(stackTrace));
+}
+
+
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
@@ -1583,7 +1599,9 @@
StackTraceOptions options) {
if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>();
ENTER_V8;
- return i::Top::CaptureCurrentStackTrace(frame_limit, options);
+ i::Handle<i::JSArray> stackTrace =
+ i::Top::CaptureCurrentStackTrace(frame_limit, options);
+ return Utils::StackTraceToLocal(stackTrace);
}
@@ -3782,6 +3800,17 @@
}
+void V8::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
+ i::Top::SetCaptureStackTraceForUncaughtExceptions(
+ capture,
+ frame_limit,
+ options);
+}
+
+
void V8::SetCounterFunction(CounterLookupCallback callback) {
if (IsDeadCheck("v8::V8::SetCounterFunction()")) return;
i::StatsTable::SetCounterFunction(callback);
@@ -4184,6 +4213,12 @@
}
+void Debug::DebugBreakForCommand(ClientData* data) {
+ if (!i::V8::IsRunning()) return;
+ i::Debugger::EnqueueDebugCommand(data);
+}
+
+
static v8::Debug::MessageHandler message_handler = NULL;
static void MessageHandlerWrapper(const v8::Debug::Message& message) {
@@ -4526,6 +4561,12 @@
}
+uint64_t HeapGraphNode::GetId() const {
+ IsDeadCheck("v8::HeapGraphNode::GetId");
+ return reinterpret_cast<const i::HeapEntry*>(this)->id();
+}
+
+
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
@@ -4589,6 +4630,22 @@
}
+const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
+ IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
+ const i::HeapSnapshotsDiff* diff =
+ reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+ return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
+}
+
+
+const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
+ IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
+ const i::HeapSnapshotsDiff* diff =
+ reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+ return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
+}
+
+
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
@@ -4604,7 +4661,7 @@
}
-const HeapGraphNode* HeapSnapshot::GetHead() const {
+const HeapGraphNode* HeapSnapshot::GetRoot() const {
IsDeadCheck("v8::HeapSnapshot::GetHead");
const i::HeapSnapshot* snapshot =
reinterpret_cast<const i::HeapSnapshot*>(this);
@@ -4612,6 +4669,18 @@
}
+const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
+ const HeapSnapshot* snapshot) const {
+ IsDeadCheck("v8::HeapSnapshot::CompareWith");
+ i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(this));
+ i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+ return reinterpret_cast<const HeapSnapshotsDiff*>(
+ snapshot1->CompareWith(snapshot2));
+}
+
+
int HeapProfiler::GetSnapshotsCount() {
IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index f5ff43a..0dc6b77 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1801,11 +1801,119 @@
}
+static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
+ uint64_t i;
+ memcpy(&i, &d, 8);
+
+ *lo = i & 0xffffffff;
+ *hi = i >> 32;
+}
+
+// Only works for little endian floating point formats.
+// We don't support VFP on the mixed endian floating point platform.
+static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+
+ // VMOV can accept an immediate of the form:
+ //
+ // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
+ //
+ // The immediate is encoded using an 8-bit quantity, comprised of two
+ // 4-bit fields. For an 8-bit immediate of the form:
+ //
+ // [abcdefgh]
+ //
+ // where a is the MSB and h is the LSB, an immediate 64-bit double can be
+ // created of the form:
+ //
+ // [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b.
+ //
+
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(d, &lo, &hi);
+
+ // The most obvious constraint is the long block of zeroes.
+ if ((lo != 0) || ((hi & 0xffff) != 0)) {
+ return false;
+ }
+
+ // Bits 62:55 must be all clear or all set.
+ if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
+ return false;
+ }
+
+ // Bit 63 must be NOT bit 62.
+ if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
+ return false;
+ }
+
+ // Create the encoded immediate in the form:
+ // [00000000,0000abcd,00000000,0000efgh]
+ *encoding = (hi >> 16) & 0xf; // Low nybble.
+ *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
+ *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
+
+ return true;
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
+ double imm,
+ const Condition cond) {
+ // Dd = immediate
+ // Instruction details available in ARM DDI 0406B, A8-640.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+
+ uint32_t enc;
+ if (FitsVMOVDoubleImmediate(imm, &enc)) {
+ // The double can be encoded in the instruction.
+ emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+ } else {
+ // Synthesise the double from ARM immediates. This could be implemented
+ // using vldr from a constant pool.
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(imm, &lo, &hi);
+
+ if (lo == hi) {
+ // If the lo and hi parts of the double are equal, the literal is easier
+ // to create. This is the case with 0.0.
+ mov(ip, Operand(lo));
+ vmov(dst, ip, ip);
+ } else {
+ // Move the low part of the double into the lower of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(lo));
+ vmov(dst.low(), ip, cond);
+
+ // Move the high part of the double into the higher of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(hi));
+ vmov(dst.high(), ip, cond);
+ }
+ }
+}
+
+
+void Assembler::vmov(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond) {
+ // Sd = Sm
+ // Instruction details available in ARM DDI 0406B, A8-642.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xB*B20 |
+ dst.code()*B12 | 0x5*B9 | B6 | src.code());
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 6a4fb23..226fb87 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -130,6 +130,20 @@
// Supporting d0 to d15, can be later extended to d31.
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ SwVfpRegister low() const {
+ SwVfpRegister reg;
+ reg.code_ = code_ * 2;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+ SwVfpRegister high() const {
+ SwVfpRegister reg;
+ reg.code_ = (code_ * 2) + 1;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
int code() const {
ASSERT(is_valid());
return code_;
@@ -932,6 +946,12 @@
const Condition cond = al);
void vmov(const DwVfpRegister dst,
+ double imm,
+ const Condition cond = al);
+ void vmov(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index fa6efcd..0b2081b 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -54,11 +54,15 @@
Condition cc,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* lhs_not_nan,
Label* slow,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
static void MultiplyByKnownInt(MacroAssembler* masm,
Register source,
Register destination,
@@ -1404,11 +1408,7 @@
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
- if (!rhs.is(r0)) {
- __ Swap(rhs, lhs, ip);
- }
-
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0));
exit.Jump();
@@ -4343,9 +4343,7 @@
__ bind(&powi);
// Load 1.0 into d0.
- __ mov(scratch2, Operand(0x3ff00000));
- __ mov(scratch1, Operand(0));
- __ vmov(d0, scratch1, scratch2);
+ __ vmov(d0, 1.0);
// Get the absolute untagged value of the exponent and use that for the
// calculation.
@@ -4405,9 +4403,7 @@
AVOID_NANS_AND_INFINITIES);
// Load 1.0 into d2.
- __ mov(scratch2, Operand(0x3ff00000));
- __ mov(scratch1, Operand(0));
- __ vmov(d2, scratch1, scratch2);
+ __ vmov(d2, 1.0);
// Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
__ vdiv(d0, d2, d0);
@@ -4764,6 +4760,24 @@
}
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
+ // typeof(arg) == function).
+ // It includes undetectable objects (as opposed to IsObject).
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register value = frame_->PopToRegister();
+ __ tst(value, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ // Check that this is an object.
+ __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
+ __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
+ cc_reg_ = ge;
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -4874,12 +4888,8 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r4, Operand(r0));
__ bind(&heapnumber_allocated);
@@ -6976,7 +6986,7 @@
// undefined >= undefined should fail.
__ mov(r0, Operand(LESS));
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
}
}
}
@@ -6990,7 +7000,7 @@
} else {
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
if (cc != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of
@@ -7022,14 +7032,14 @@
// value if it's a NaN.
if (cc != eq) {
// All-zero means Infinity means equal.
- __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
+ __ Ret(eq);
if (cc == le) {
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
}
// No fall through here.
}
@@ -7040,43 +7050,50 @@
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* lhs_not_nan,
Label* slow,
bool strict) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
Label rhs_is_smi;
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi);
// Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
// If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal (r0 is already not zero)
- __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ // succeed. Return non-equal
+ // If rhs is r0 then there is already a non zero value in it.
+ if (!rhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Lhs (r1) is a smi, rhs (r0) is a number.
+ // Lhs is a smi, rhs is a number.
if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7 .
+ // Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
+ __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
__ push(lr);
// Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(r1));
+ __ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
}
@@ -7086,34 +7103,35 @@
__ bind(&rhs_is_smi);
// Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
// If lhs is not a number and rhs is a smi then strict equality cannot
// succeed. Return non-equal.
- __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
- __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ // If lhs is r0 then there is already a non zero value in it.
+ if (!lhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
} else {
// Smi compared non-strictly with a non-smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Rhs (r0) is a smi, lhs (r1) is a heap number.
+ // Rhs is a smi, lhs is a heap number.
if (CpuFeatures::IsSupported(VFP3)) {
- // Convert rhs to a double in d6 .
CpuFeatures::Scope scope(VFP3);
// Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
+ // Convert rhs to a double in d6 .
+ __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
} else {
__ push(lr);
// Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(r0));
+ __ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7167,7 +7185,7 @@
} else {
__ mov(r0, Operand(LESS));
}
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
__ bind(&neither_is_nan);
}
@@ -7188,11 +7206,11 @@
__ cmp(rhs_mantissa, Operand(lhs_mantissa));
__ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
// Return non-zero if the numbers are unequal.
- __ mov(pc, Operand(lr), LeaveCC, ne);
+ __ Ret(ne);
__ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
// If exponents are equal then return 0.
- __ mov(pc, Operand(lr), LeaveCC, eq);
+ __ Ret(eq);
// Exponents are unequal. The only way we can return that the numbers
// are equal is if one is -0 and the other is 0. We already dealt
@@ -7202,11 +7220,11 @@
// equal.
__ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
__ mov(r0, Operand(r4), LeaveCC, ne);
- __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
+ __ Ret(ne);
// Now they are equal if and only if the lhs exponent is zero in its
// low 31 bits.
__ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ mov(pc, Operand(lr));
+ __ Ret();
} else {
// Call a native function to do a comparison between two non-NaNs.
// Call C routine that may not cause GC or other trouble.
@@ -7219,7 +7237,12 @@
// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
// If either operand is a JSObject or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
@@ -7227,20 +7250,20 @@
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
Label return_not_equal;
__ bind(&return_not_equal);
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -7259,12 +7282,17 @@
// See comment at call site.
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
__ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
__ cmp(r2, r3);
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
@@ -7272,13 +7300,13 @@
// for that.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
} else {
- __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
}
__ jmp(both_loaded_as_doubles);
}
@@ -7286,9 +7314,14 @@
// Fast negative check for symbol-to-symbol equality.
static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
Label* possible_strings,
Label* not_both_strings) {
- // r2 is object type of r0.
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ // r2 is object type of rhs.
// Ensure that no non-strings have the symbol bit set.
Label object_test;
ASSERT(kSymbolTag != 0);
@@ -7296,31 +7329,31 @@
__ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask));
__ b(eq, possible_strings);
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, possible_strings);
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
- __ mov(r0, Operand(1)); // Non-zero indicates not equal.
- __ mov(pc, Operand(lr)); // Return.
+ __ mov(r0, Operand(NOT_EQUAL));
+ __ Ret();
__ bind(&object_test);
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
+ // If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
// equal to undefined.
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ and_(r0, r2, Operand(r3));
__ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
__ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ mov(pc, Operand(lr)); // Return.
+ __ Ret();
}
@@ -7442,10 +7475,13 @@
}
-// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On entry lhs_ and rhs_ are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -7460,7 +7496,7 @@
// be strictly equal if the other is a HeapNumber.
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, r0, Operand(r1));
+ __ and_(r2, lhs_, Operand(rhs_));
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -7472,7 +7508,7 @@
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
@@ -7489,7 +7525,7 @@
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
__ mov(r0, Operand(LESS), LeaveCC, lt);
__ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ mov(pc, Operand(lr));
+ __ Ret();
__ bind(&nan);
// If one of the sides was a NaN then the v flag is set. Load r0 with
@@ -7500,7 +7536,7 @@
} else {
__ mov(r0, Operand(LESS));
}
- __ mov(pc, Operand(lr));
+ __ Ret();
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
@@ -7512,11 +7548,11 @@
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in r0 and r1.
+ // and neither of them is a Smi. The objects are in rhs_ and lhs_.
if (strict_) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm);
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
}
Label check_for_symbols;
@@ -7524,8 +7560,10 @@
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of r0. Never falls through.
+ // In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
@@ -7536,20 +7574,20 @@
if (cc_ == eq && !strict_) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of r0 on entry.
- EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
+ // Assumes that r2 is the type of rhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
__ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- r1,
- r0,
+ lhs_,
+ rhs_,
r2,
r3,
r4,
@@ -7558,7 +7596,7 @@
__ bind(&slow);
- __ Push(r1, r0);
+ __ Push(lhs_, rhs_);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
if (cc_ == eq) {
@@ -10059,6 +10097,9 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
@@ -10075,6 +10116,9 @@
default: cc_name = "UnknownCondition"; break;
}
+ const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
+ const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
+
const char* strict_name = "";
if (strict_ && (cc_ == eq || cc_ == ne)) {
strict_name = "_STRICT";
@@ -10091,8 +10135,10 @@
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
+ "CompareStub_%s%s%s%s%s%s",
cc_name,
+ lhs_name,
+ rhs_name,
strict_name,
never_nan_nan_name,
include_number_compare_name);
@@ -10104,8 +10150,11 @@
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
// condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
+ ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+ | RegisterField::encode(lhs_.is(r0))
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 855723d..83685d8 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -475,6 +475,7 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 002e4c1..3df7b4e 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -37,6 +37,26 @@
namespace v8i = v8::internal;
+double Instr::DoubleImmedVmov() const {
+ // Reconstruct a double from the immediate encoded in the vmov instruction.
+ //
+ // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
+ // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
+ // 00000000,00000000,00000000,00000000]
+ //
+ // where B = ~b. Only the high 16 bits are affected.
+ uint64_t high16;
+ high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
+
+ uint64_t imm = high16 << 48;
+ double d;
+ memcpy(&d, &imm, 8);
+ return d;
+}
+
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index fa9adbd..2ac9a41 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -333,6 +333,9 @@
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
+ // Decoding the double immediate in the vmov instruction.
+ double DoubleImmedVmov() const;
+
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instr.
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index a52417b..37401ed 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -412,6 +412,12 @@
PrintCondition(instr);
return 4;
}
+ case 'd': { // 'd: vmov double immediate.
+ double d = instr->DoubleImmedVmov();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%g", d);
+ return 1;
+ }
case 'f': { // 'f: bitfield instructions - v7 and above.
uint32_t lsbit = instr->Bits(11, 7);
uint32_t width = instr->Bits(20, 16) + 1;
@@ -1052,7 +1058,7 @@
if (instr->SzField() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else {
- Unknown(instr); // Not used by V8.
+ Format(instr, "vmov.f32'cond 'Sd, 'Sm");
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -1066,6 +1072,12 @@
DecodeVCMP(instr);
} else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
+ } else if (instr->Opc3Field() == 0x0) {
+ if (instr->SzField() == 0x1) {
+ Format(instr, "vmov.f64'cond 'Dd, 'd");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
} else {
Unknown(instr); // Not used by V8.
}
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 080cb83..7d90ed9 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -822,8 +822,7 @@
// the smi vs. smi case to be handled before it is called.
Label slow_case;
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
- __ mov(r2, r1);
- __ orr(r2, r2, r0);
+ __ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
__ cmp(r1, r0);
@@ -832,9 +831,9 @@
__ b(clause->body_target()->entry_label());
__ bind(&slow_case);
- CompareStub stub(eq, true);
+ CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
- __ tst(r0, r0);
+ __ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@@ -1909,6 +1908,25 @@
}
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ BranchOnSmi(r0, if_false);
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, if_true);
+ __ b(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2161,12 +2179,8 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r4, Operand(r0));
__ bind(&heapnumber_allocated);
@@ -3092,7 +3106,7 @@
__ jmp(if_false);
__ bind(&slow_case);
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0));
__ b(cc, if_true);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 2896cc9..f251b31 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -873,88 +873,6 @@
}
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between scratch and the other
- // registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- while (object != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Get the map of the current object.
- ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- cmp(scratch, Operand(Handle<Map>(object->map())));
-
- // Branch on the result of the map check.
- b(ne, miss);
-
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- // Restore scratch register to be the map of the object. In the
- // new space case below, we load the prototype from the map in
- // the scratch register.
- ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- reg = holder_reg; // from now the object is in holder_reg
- JSObject* prototype = JSObject::cast(object->GetPrototype());
- if (Heap::InNewSpace(prototype)) {
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- mov(reg, Operand(Handle<JSObject>(prototype)));
- }
-
- if (save_at_depth == depth) {
- str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- object = prototype;
- }
-
- // Check the holder map.
- ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- cmp(scratch, Operand(Handle<Map>(object->map())));
- b(ne, miss);
-
- // Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(object == holder);
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- return reg;
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index f1f7de7..156e132 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -316,24 +316,6 @@
// ---------------------------------------------------------------------------
// Inline caching support
- // Generates code that verifies that the maps of objects in the
- // prototype chain of object hasn't changed since the code was
- // generated and branches to the miss label if any map has. If
- // necessary the function also generates code for security check
- // in case of global object holders. The scratch and holder
- // registers are always clobbered, but the object register is only
- // clobbered if it the same as the holder register. The function
- // returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [sp].
- Register CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss);
-
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, whereas both scratch registers are clobbered.
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index e8910f4..c67c7aa 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -799,7 +799,6 @@
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
- NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index f09ce00..3345e45 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2281,7 +2281,7 @@
if (instr->SzField() == 0x1) {
set_d_register_from_double(vd, get_double_from_d_register(vm));
} else {
- UNREACHABLE(); // Not used by V8.
+ set_s_register_from_float(vd, get_float_from_s_register(vm));
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -2298,6 +2298,13 @@
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
set_d_register_from_double(vd, dd_value);
+ } else if (instr->Opc3Field() == 0x0) {
+ // vmov immediate.
+ if (instr->SzField() == 0x1) {
+ set_d_register_from_double(vd, instr->DoubleImmedVmov());
+ } else {
+ UNREACHABLE(); // Not used by v8.
+ }
} else {
UNREACHABLE(); // Not used by V8.
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 0e649cc..ff3007c 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -83,6 +83,119 @@
}
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsSymbol());
+ __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1);
+ __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ b(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ cmp(map, tmp);
+ __ b(ne, miss_label);
+
+ // Restore the temporarily used register.
+ __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch1;
+ // Capacity is smi 2^n.
+ __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
+ __ sub(index, index, Operand(1));
+ __ and_(index, index, Operand(
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch1;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+ __ add(tmp, properties, Operand(index, LSL, 1));
+ __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ cmp(entity_name, tmp);
+ if (i != kProbes - 1) {
+ __ b(eq, &done);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Operand(Handle<String>(name)));
+ __ b(eq, miss_label);
+
+ // Check if the entry name is not a symbol.
+ __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ tst(entity_name, Operand(kIsSymbolMask));
+ __ b(eq, miss_label);
+
+ // Restore the properties.
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ } else {
+ // Give up probing if still not found the undefined value.
+ __ b(ne, miss_label);
+ }
+ }
+ __ bind(&done);
+ __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1);
+}
+
+
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -517,6 +630,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -532,6 +646,7 @@
receiver,
scratch1,
scratch2,
+ scratch3,
holder,
lookup,
name,
@@ -543,6 +658,7 @@
receiver,
scratch1,
scratch2,
+ scratch3,
name,
holder,
miss);
@@ -555,6 +671,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
@@ -596,7 +713,7 @@
Register holder =
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
- scratch2, name, depth1, miss);
+ scratch2, scratch3, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -612,7 +729,7 @@
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ scratch2, scratch3, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -648,12 +765,13 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
+ scratch1, scratch2, scratch3, name,
miss_label);
// Call a runtime function to load the interceptor property.
@@ -738,36 +856,134 @@
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra) {
- // Check that the maps haven't changed.
- Register result =
- masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
- save_at_depth, miss);
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ str(reg, MemOperand(sp));
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ JSObject* current = object;
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ Object* lookup_result = Heap::LookupSymbol(name);
+ if (lookup_result->IsFailure()) {
+ set_failure(Failure::cast(lookup_result));
+ return reg;
+ } else {
+ name = String::cast(lookup_result);
+ }
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // from now the object is in holder_reg
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // Get the map of the current object.
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+
+ // Branch on the result of the map check.
+ __ b(ne, miss);
+
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ // Restore scratch register to be the map of the object. In the
+ // new space case below, we load the prototype from the map in
+ // the scratch register.
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // from now the object is in holder_reg
+ if (Heap::InNewSpace(prototype)) {
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ mov(reg, Operand(Handle<JSObject>(prototype)));
+ }
+ }
+
+ if (save_at_depth == depth) {
+ __ str(reg, MemOperand(sp));
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+
+ // Check the holder map.
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+ __ b(ne, miss);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth + 1));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(current == holder);
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- while (object != holder) {
- if (object->IsGlobalObject()) {
+ current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(object),
+ GlobalObject::cast(current),
name,
- scratch,
+ scratch1,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
- return result;
+ return reg;
}
}
- object = JSObject::cast(object->GetPrototype());
+ current = JSObject::cast(current->GetPrototype());
}
// Return the register containing the holder.
- return result;
+ return reg;
}
@@ -776,6 +992,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -785,7 +1002,8 @@
// Check that the maps haven't changed.
Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
}
@@ -796,6 +1014,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -805,7 +1024,8 @@
// Check that the maps haven't changed.
Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ mov(r0, Operand(Handle<Object>(value)));
@@ -819,6 +1039,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -829,7 +1050,8 @@
// Check that the maps haven't changed.
Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // Receiver.
@@ -854,6 +1076,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
@@ -881,7 +1104,8 @@
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
@@ -930,6 +1154,7 @@
lookup->holder(),
scratch1,
scratch2,
+ scratch3,
name,
miss);
}
@@ -975,7 +1200,8 @@
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
@@ -1053,7 +1279,7 @@
__ b(eq, &miss);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss);
+ Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
GenerateCallFunction(masm(), object, arguments(), &miss);
@@ -1098,7 +1324,7 @@
__ b(eq, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1149,7 +1375,7 @@
__ b(eq, &miss);
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1246,7 +1472,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name,
+ CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
// Patch the receiver on the stack with the global proxy if
@@ -1270,7 +1496,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, name, &miss);
+ r1, r4, name, &miss);
}
break;
@@ -1290,7 +1516,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, name, &miss);
+ r1, r4, name, &miss);
}
break;
}
@@ -1313,7 +1539,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
- r1, name, &miss);
+ r1, r4, name, &miss);
}
break;
}
@@ -1372,6 +1598,7 @@
r1,
r3,
r4,
+ r0,
&miss);
// Move returned value, the function to call, to r1.
@@ -1418,7 +1645,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, r0, holder, r3, r1, name, &miss);
+ CheckPrototypes(object, r0, holder, r3, r1, r4, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1642,7 +1869,7 @@
__ b(eq, &miss);
// Check the maps of the full prototype chain.
- CheckPrototypes(object, r0, last, r3, r1, name, &miss);
+ CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1679,7 +1906,7 @@
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
+ GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1700,7 +1927,7 @@
Label miss;
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
+ bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1723,7 +1950,7 @@
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
+ GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1751,6 +1978,7 @@
r2,
r3,
r1,
+ r4,
name,
&miss);
__ bind(&miss);
@@ -1782,7 +2010,7 @@
}
// Check that the map of the global has not changed.
- CheckPrototypes(object, r0, holder, r3, r4, name, &miss);
+ CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1823,7 +2051,7 @@
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss);
+ GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1847,7 +2075,7 @@
__ b(ne, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
+ bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1873,7 +2101,7 @@
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss);
+ GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1905,6 +2133,7 @@
r0,
r2,
r3,
+ r4,
name,
&miss);
__ bind(&miss);
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index bbd69ec..e1d4489 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -812,6 +812,9 @@
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
+ initial_map->set_scavenger(
+ Heap::GetScavenger(initial_map->instance_type(),
+ initial_map->instance_size()));
}
{ // -- J S O N
diff --git a/src/builtins.cc b/src/builtins.cc
index ad52ea1..3a0393e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1475,7 +1475,7 @@
// During startup it's OK to always allocate and defer GC to later.
// This simplifies things because we don't need to retry.
AlwaysAllocateScope __scope__;
- code = Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+ code = Heap::CreateCode(desc, flags, masm.CodeObject());
if (code->IsFailure()) {
v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
}
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 9d5969b..78062b4 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -102,8 +102,7 @@
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Handle<Code> new_object =
- Factory::NewCode(desc, NULL, flags, masm.CodeObject());
+ Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
if (has_custom_cache()) {
@@ -140,8 +139,7 @@
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Object* new_object =
- Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+ Object* new_object = Heap::CreateCode(desc, flags, masm.CodeObject());
if (new_object->IsFailure()) return new_object;
code = Code::cast(new_object);
RecordCodeGeneration(code, &masm);
diff --git a/src/codegen.cc b/src/codegen.cc
index 8864c95..84b73a4 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -162,9 +162,7 @@
// Allocate and install the code.
CodeDesc desc;
masm->GetCode(&desc);
- ZoneScopeInfo sinfo(info->scope());
- Handle<Code> code =
- Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
+ Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
#ifdef ENABLE_DISASSEMBLER
bool print_code = Bootstrapper::IsActive()
diff --git a/src/codegen.h b/src/codegen.h
index 783bef0..7a4b858 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -120,6 +120,7 @@
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
+ F(IsSpecObject, 1, 1) \
F(StringAdd, 2, 1) \
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
@@ -180,7 +181,6 @@
CodeGenerator* previous_;
};
-
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
@@ -461,11 +461,15 @@
CompareStub(Condition cc,
bool strict,
NaNInformation nan_info = kBothCouldBeNaN,
- bool include_number_compare = true) :
+ bool include_number_compare = true,
+ Register lhs = no_reg,
+ Register rhs = no_reg) :
cc_(cc),
strict_(strict),
never_nan_nan_(nan_info == kCantBothBeNaN),
include_number_compare_(include_number_compare),
+ lhs_(lhs),
+ rhs_(rhs),
name_(NULL) { }
void Generate(MacroAssembler* masm);
@@ -483,12 +487,19 @@
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
+ // Register holding the left hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register lhs_;
+ // Register holding the right hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register rhs_;
- // Encoding of the minor key CCCCCCCCCCCCCCNS.
+ // Encoding of the minor key CCCCCCCCCCCCRCNS.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class ConditionField: public BitField<int, 3, 13> {};
+ class RegisterField: public BitField<bool, 3, 1> {};
+ class ConditionField: public BitField<int, 4, 12> {};
Major MajorKey() { return Compare; }
@@ -507,11 +518,17 @@
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s), "
- "(never_nan_nan %s), (number_compare %s)\n",
+ "(never_nan_nan %s), (number_compare %s) ",
static_cast<int>(cc_),
strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
include_number_compare_ ? "included" : "not included");
+
+ if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
+ PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
+ } else {
+ PrintF("\n");
+ }
}
#endif
};
diff --git a/src/compiler.cc b/src/compiler.cc
index ebb9743..d87d9da 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -40,6 +40,7 @@
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
+#include "scopeinfo.h"
namespace v8 {
namespace internal {
@@ -156,7 +157,12 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
Handle<Context> context = Handle<Context>::null();
- return MakeCode(context, info);
+ Handle<Code> code = MakeCode(context, info);
+ if (!info->shared_info().is_null()) {
+ info->shared_info()->set_scope_info(
+ *SerializedScopeInfo::Create(info->scope()));
+ }
+ return code;
}
#endif
@@ -252,9 +258,11 @@
// Allocate function.
Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(lit->name(),
- lit->materialized_literal_count(),
- code);
+ Factory::NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ code,
+ SerializedScopeInfo::Create(info.scope()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@@ -275,9 +283,6 @@
}
-static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
-
-
Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
@@ -306,9 +311,7 @@
// No cache entry found. Do pre-parsing and compile the script.
ScriptDataImpl* pre_data = input_pre_data;
if (pre_data == NULL && source_length >= FLAG_min_preparse_length) {
- Access<SafeStringInputBuffer> buf(&safe_string_input_buffer);
- buf->Reset(source.location());
- pre_data = PreParse(source, buf.value(), extension);
+ pre_data = PreParse(source, NULL, extension);
}
// Create a script object describing the script to be compiled.
@@ -445,7 +448,12 @@
info->script(),
code);
- // Update the shared function info with the compiled code.
+ // Update the shared function info with the compiled code and the scope info.
+ // Please note, that the order of the sharedfunction initialization is
+ // important since set_scope_info might trigger a GC, causing the ASSERT
+ // below to be invalid if the code was flushed. By settting the code
+ // object last we avoid this.
+ shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
shared->set_code(*code);
// Set the expected number of properties for instances.
@@ -481,6 +489,8 @@
bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive();
+ Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+
// Generate code
Handle<Code> code;
if (FLAG_lazy && allow_lazy) {
@@ -562,13 +572,15 @@
literal->start_position(),
script,
code);
+ scope_info = SerializedScopeInfo::Create(info.scope());
}
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
- code);
+ code,
+ scope_info);
SetFunctionInfo(result, literal, false, script);
// Set the expected number of properties for instances and return
diff --git a/src/contexts.cc b/src/contexts.cc
index 19920d2..723354f 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -120,9 +120,10 @@
// we have context-local slots
// check non-parameter locals in context
- Handle<Code> code(context->closure()->code());
+ Handle<SerializedScopeInfo> scope_info(
+ context->closure()->shared()->scope_info());
Variable::Mode mode;
- int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+ int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
// slot found
@@ -150,13 +151,11 @@
}
// check parameter locals in context
- int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+ int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) {
// slot found.
int index =
- ScopeInfo<>::ContextSlotIndex(*code,
- Heap::arguments_shadow_symbol(),
- NULL);
+ scope_info->ContextSlotIndex(Heap::arguments_shadow_symbol(), NULL);
ASSERT(index >= 0); // arguments must exist and be in the heap context
Handle<JSObject> arguments(JSObject::cast(context->get(index)));
ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
@@ -170,7 +169,7 @@
// check intermediate context (holding only the function name variable)
if (follow_context_chain) {
- int index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+ int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
// slot found
if (FLAG_trace_contexts) {
@@ -216,18 +215,19 @@
ASSERT(context->is_function_context());
// Check non-parameter locals.
- Handle<Code> code(context->closure()->code());
+ Handle<SerializedScopeInfo> scope_info(
+ context->closure()->shared()->scope_info());
Variable::Mode mode;
- int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode);
+ int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) return false;
// Check parameter locals.
- int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
+ int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) return false;
// Check context only holding the function name variable.
- index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name);
+ index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false;
context = Context::cast(context->closure()->context());
}
diff --git a/src/debug.cc b/src/debug.cc
index b8e0252..7288135 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -759,7 +759,7 @@
if (caught_exception) {
Handle<Object> message = MessageHandler::MakeMessageObject(
"error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
- Handle<String>());
+ Handle<String>(), Handle<JSArray>());
MessageHandler::ReportMessage(NULL, message);
return false;
}
@@ -1882,6 +1882,7 @@
DebuggerAgent* Debugger::agent_ = NULL;
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
+LockingCommandMessageQueue Debugger::event_command_queue_(kQueueInitialSize);
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@@ -2207,39 +2208,75 @@
event_data,
auto_continue);
}
- // Notify registered debug event listener. This can be either a C or a
- // JavaScript function.
- if (!event_listener_.is_null()) {
- if (event_listener_->IsProxy()) {
- // C debug event listener.
- Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
- v8::Debug::EventCallback2 callback =
- FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
- EventDetailsImpl event_details(
- event,
- Handle<JSObject>::cast(exec_state),
- event_data,
- event_listener_data_);
- callback(event_details);
- } else {
- // JavaScript debug event listener.
- ASSERT(event_listener_->IsJSFunction());
- Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
-
- // Invoke the JavaScript debug event listener.
- const int argc = 4;
- Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
- exec_state.location(),
- Handle<Object>::cast(event_data).location(),
- event_listener_data_.location() };
- Handle<Object> result = Execution::TryCall(fun, Top::global(),
- argc, argv, &caught_exception);
- // Silently ignore exceptions from debug event listeners.
+ // Notify registered debug event listener. This can be either a C or
+ // a JavaScript function. Don't call event listener for v8::Break
+ // here, if it's only a debug command -- they will be processed later.
+ if ((event != v8::Break || !auto_continue) && !event_listener_.is_null()) {
+ CallEventCallback(event, exec_state, event_data, NULL);
+ }
+ // Process pending debug commands.
+ if (event == v8::Break) {
+ while (!event_command_queue_.IsEmpty()) {
+ CommandMessage command = event_command_queue_.Get();
+ if (!event_listener_.is_null()) {
+ CallEventCallback(v8::BreakForCommand,
+ exec_state,
+ event_data,
+ command.client_data());
+ }
+ command.Dispose();
}
}
}
+void Debugger::CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data) {
+ if (event_listener_->IsProxy()) {
+ CallCEventCallback(event, exec_state, event_data, client_data);
+ } else {
+ CallJSEventCallback(event, exec_state, event_data);
+ }
+}
+
+
+void Debugger::CallCEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data) {
+ Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
+ v8::Debug::EventCallback2 callback =
+ FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+ EventDetailsImpl event_details(
+ event,
+ Handle<JSObject>::cast(exec_state),
+ Handle<JSObject>::cast(event_data),
+ event_listener_data_,
+ client_data);
+ callback(event_details);
+}
+
+
+void Debugger::CallJSEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data) {
+ ASSERT(event_listener_->IsJSFunction());
+ Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
+
+ // Invoke the JavaScript debug event listener.
+ const int argc = 4;
+ Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
+ exec_state.location(),
+ Handle<Object>::cast(event_data).location(),
+ event_listener_data_.location() };
+ bool caught_exception = false;
+ Execution::TryCall(fun, Top::global(), argc, argv, &caught_exception);
+ // Silently ignore exceptions from debug event listeners.
+}
+
+
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
EnterDebugger debugger;
@@ -2273,6 +2310,7 @@
bool sendEventMessage = false;
switch (event) {
case v8::Break:
+ case v8::BreakForCommand:
sendEventMessage = !auto_continue;
break;
case v8::Exception:
@@ -2560,6 +2598,17 @@
}
+void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
+ CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
+ event_command_queue_.Put(message);
+
+ // Set the debug command break flag to have the command processed.
+ if (!Debug::InDebugger()) {
+ StackGuard::DebugCommand();
+ }
+}
+
+
bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_);
@@ -2761,11 +2810,13 @@
EventDetailsImpl::EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
- Handle<Object> callback_data)
+ Handle<Object> callback_data,
+ v8::Debug::ClientData* client_data)
: event_(event),
exec_state_(exec_state),
event_data_(event_data),
- callback_data_(callback_data) {}
+ callback_data_(callback_data),
+ client_data_(client_data) {}
DebugEvent EventDetailsImpl::GetEvent() const {
@@ -2793,6 +2844,11 @@
}
+v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
+ return client_data_;
+}
+
+
CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
}
diff --git a/src/debug.h b/src/debug.h
index fb92692..7bb4a42 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -566,18 +566,21 @@
EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
- Handle<Object> callback_data);
+ Handle<Object> callback_data,
+ v8::Debug::ClientData* client_data);
virtual DebugEvent GetEvent() const;
virtual v8::Handle<v8::Object> GetExecutionState() const;
virtual v8::Handle<v8::Object> GetEventData() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Handle<v8::Value> GetCallbackData() const;
+ virtual v8::Debug::ClientData* GetClientData() const;
private:
DebugEvent event_; // Debug event causing the break.
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
- Handle<Object> callback_data_; // User data passed with the callback when
- // it was registered.
+ Handle<JSObject> exec_state_; // Current execution state.
+ Handle<JSObject> event_data_; // Data associated with the event.
+ Handle<Object> callback_data_; // User data passed with the callback
+ // when it was registered.
+ v8::Debug::ClientData* client_data_; // Data passed to DebugBreakForCommand.
};
@@ -706,6 +709,9 @@
// Check whether there are commands in the command queue.
static bool HasCommands();
+ // Enqueue a debugger command to the command queue for event listeners.
+ static void EnqueueDebugCommand(v8::Debug::ClientData* client_data = NULL);
+
static Handle<Object> Call(Handle<JSFunction> fun,
Handle<Object> data,
bool* pending_exception);
@@ -753,6 +759,17 @@
static bool IsDebuggerActive();
private:
+ static void CallEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ static void CallCEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data,
+ v8::Debug::ClientData* client_data);
+ static void CallJSEventCallback(v8::DebugEvent event,
+ Handle<Object> exec_state,
+ Handle<Object> event_data);
static void ListenersChanged();
static Mutex* debugger_access_; // Mutex guarding debugger variables.
@@ -775,6 +792,8 @@
static LockingCommandMessageQueue command_queue_;
static Semaphore* command_received_; // Signaled for each command received.
+ static LockingCommandMessageQueue event_command_queue_;
+
friend class EnterDebugger;
};
diff --git a/src/factory.cc b/src/factory.cc
index 39e881a..d653383 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -277,6 +277,8 @@
copy->set_inobject_properties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(copy->instance_size() + instance_size_delta);
+ copy->set_scavenger(Heap::GetScavenger(copy->instance_type(),
+ copy->instance_size()));
return copy;
}
@@ -541,10 +543,9 @@
Handle<Code> Factory::NewCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_ref) {
- CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code);
+ CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref), Code);
}
@@ -680,9 +681,13 @@
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, Handle<Code> code) {
+ Handle<String> name,
+ int number_of_literals,
+ Handle<Code> code,
+ Handle<SerializedScopeInfo> scope_info) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
shared->set_code(*code);
+ shared->set_scope_info(*scope_info);
int literals_array_size = number_of_literals;
// If the function contains object, regexp or array literals,
// allocate extra space for a literals array prefix containing the
diff --git a/src/factory.h b/src/factory.h
index 56deda5..2251112 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -34,9 +34,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class ZoneScopeInfo;
-
// Interface for handle based allocation.
class Factory : public AllStatic {
@@ -241,7 +238,6 @@
PretenureFlag pretenure = TENURED);
static Handle<Code> NewCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference);
@@ -352,7 +348,10 @@
}
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name, int number_of_literals, Handle<Code> code);
+ Handle<String> name,
+ int number_of_literals,
+ Handle<Code> code,
+ Handle<SerializedScopeInfo> scope_info);
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
static Handle<NumberDictionary> DictionaryAtNumberPut(
diff --git a/src/frames.cc b/src/frames.cc
index 67a20d3..bdd5100 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -532,8 +532,11 @@
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
+ Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
+ scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
Object* script_obj = shared->script();
if (script_obj->IsScript()) {
Handle<Script> script(Script::cast(script_obj));
@@ -561,7 +564,7 @@
// Get scope information for nicer output, if possible. If code is
// NULL, or doesn't contain scope info, info will return 0 for the
// number of parameters, stack slots, or context slots.
- ScopeInfo<PreallocatedStorage> info(code);
+ ScopeInfo<PreallocatedStorage> info(*scope_info);
// Print the parameters.
int parameters_count = ComputeParametersCount();
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index b64a179..8a8b39b 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -857,6 +857,8 @@
EmitIsNonNegativeSmi(expr->arguments());
} else if (strcmp("_IsObject", *name->ToCString()) == 0) {
EmitIsObject(expr->arguments());
+ } else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) {
+ EmitIsSpecObject(expr->arguments());
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
EmitIsUndetectableObject(expr->arguments());
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 3d56232..b056cee 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -402,6 +402,7 @@
void EmitIsSmi(ZoneList<Expression*>* arguments);
void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
void EmitIsObject(ZoneList<Expression*>* arguments);
+ void EmitIsSpecObject(ZoneList<Expression*>* arguments);
void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
void EmitIsFunction(ZoneList<Expression*>* arguments);
void EmitIsArray(ZoneList<Expression*>* arguments);
diff --git a/src/globals.h b/src/globals.h
index aea8858..6f985eb 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -326,6 +326,7 @@
class RegExpVisitor;
class Scope;
template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+class SerializedScopeInfo;
class Script;
class Slot;
class Smi;
@@ -345,7 +346,6 @@
class TickSample;
class VirtualMemory;
class Mutex;
-class ZoneScopeInfo;
typedef bool (*WeakSlotCallback)(Object** pointer);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 73b9748..92ded7b 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -364,6 +364,7 @@
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
+ snapshots_->SnapshotGenerationFinished();
return result;
}
@@ -391,6 +392,12 @@
}
+void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
+ ASSERT(singleton_ != NULL);
+ singleton_->snapshots_->ObjectMoveEvent(from, to);
+}
+
+
const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index b593b99..dac488e 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -38,7 +38,15 @@
class HeapSnapshot;
class HeapSnapshotsCollection;
-#endif
+#define HEAP_PROFILE(Call) \
+ do { \
+ if (v8::internal::HeapProfiler::is_profiling()) { \
+ v8::internal::HeapProfiler::Call; \
+ } \
+ } while (false)
+#else
+#define HEAP_PROFILE(Call) ((void) 0)
+#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
@@ -54,6 +62,12 @@
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
+ static void ObjectMoveEvent(Address from, Address to);
+
+ static INLINE(bool is_profiling()) {
+ return singleton_ != NULL && singleton_->snapshots_->is_tracking_objects();
+ }
+
// Obsolete interface.
// Write a single heap sample to the log file.
static void WriteSample();
diff --git a/src/heap.cc b/src/heap.cc
index 1b62589..dc41027 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -799,34 +799,34 @@
};
-// A queue of pointers and maps of to-be-promoted objects during a
-// scavenge collection.
+// A queue of objects promoted during scavenge. Each object is accompanied
+// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
public:
void Initialize(Address start_address) {
- front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
+ front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
}
bool is_empty() { return front_ <= rear_; }
- void insert(HeapObject* object, Map* map) {
- *(--rear_) = object;
- *(--rear_) = map;
+ void insert(HeapObject* target, int size) {
+ *(--rear_) = reinterpret_cast<intptr_t>(target);
+ *(--rear_) = size;
// Assert no overflow into live objects.
ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
}
- void remove(HeapObject** object, Map** map) {
- *object = *(--front_);
- *map = Map::cast(*(--front_));
+ void remove(HeapObject** target, int* size) {
+ *target = reinterpret_cast<HeapObject*>(*(--front_));
+ *size = static_cast<int>(*(--front_));
// Assert no underflow.
ASSERT(front_ >= rear_);
}
private:
// The front of the queue is higher in memory than the rear.
- HeapObject** front_;
- HeapObject** rear_;
+ intptr_t* front_;
+ intptr_t* rear_;
};
@@ -1041,31 +1041,26 @@
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- object->Iterate(scavenge_visitor);
- new_space_front += object->Size();
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, scavenge_visitor);
+ new_space_front += size;
}
// Promote and process all the to-be-promoted objects.
while (!promotion_queue.is_empty()) {
- HeapObject* source;
- Map* map;
- promotion_queue.remove(&source, &map);
- // Copy the from-space object to its new location (given by the
- // forwarding address) and fix its map.
- HeapObject* target = source->map_word().ToForwardingAddress();
- int size = source->SizeFromMap(map);
- CopyBlock(target->address(), source->address(), size);
- target->set_map(map);
+ HeapObject* target;
+ int size;
+ promotion_queue.remove(&target, &size);
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Update NewSpace stats if necessary.
- RecordCopiedObject(target);
-#endif
- // Visit the newly copied object for pointers to new space.
+ // Promoted object might be already partially visited
+ // during dirty regions iteration. Thus we search specificly
+ // for pointers to from semispace instead of looking for pointers
+ // to new space.
ASSERT(!target->IsMap());
- IterateAndMarkPointersToNewSpace(target->address(),
- target->address() + size,
- &ScavengePointer);
+ IterateAndMarkPointersToFromSpace(target->address(),
+ target->address() + size,
+ &ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
@@ -1077,7 +1072,7 @@
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-void Heap::RecordCopiedObject(HeapObject* obj) {
+static void RecordCopiedObject(HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
@@ -1086,22 +1081,24 @@
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
- if (new_space_.Contains(obj)) {
- new_space_.RecordAllocation(obj);
+ if (Heap::new_space()->Contains(obj)) {
+ Heap::new_space()->RecordAllocation(obj);
} else {
- new_space_.RecordPromotion(obj);
+ Heap::new_space()->RecordPromotion(obj);
}
}
}
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-
-HeapObject* Heap::MigrateObject(HeapObject* source,
- HeapObject* target,
- int size) {
+// Helper function used by CopyObject to copy a source object to an
+// allocated target object and update the forwarding pointer in the source
+// object. Returns the target object.
+inline static HeapObject* MigrateObject(HeapObject* source,
+ HeapObject* target,
+ int size) {
// Copy the content of source to target.
- CopyBlock(target->address(), source->address(), size);
+ Heap::CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
@@ -1110,18 +1107,278 @@
// Update NewSpace stats if necessary.
RecordCopiedObject(target);
#endif
+ HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
return target;
}
-static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
- STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
- ASSERT(object->map() == map);
- InstanceType type = map->instance_type();
- if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false;
- ASSERT(object->IsString() && !object->IsSymbol());
- return ConsString::cast(object)->unchecked_second() == Heap::empty_string();
+enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+enum SizeRestriction { SMALL, UNKNOWN_SIZE };
+
+
+template<ObjectContents object_contents, SizeRestriction size_restriction>
+static inline void EvacuateObject(Map* map,
+ HeapObject** slot,
+ HeapObject* object,
+ int object_size) {
+ ASSERT((size_restriction != SMALL) ||
+ (object_size <= Page::kMaxHeapObjectSize));
+ ASSERT(object->Size() == object_size);
+
+ if (Heap::ShouldBePromoted(object->address(), object_size)) {
+ Object* result;
+
+ if ((size_restriction != SMALL) &&
+ (object_size > Page::kMaxHeapObjectSize)) {
+ result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+ } else {
+ if (object_contents == DATA_OBJECT) {
+ result = Heap::old_data_space()->AllocateRaw(object_size);
+ } else {
+ result = Heap::old_pointer_space()->AllocateRaw(object_size);
+ }
+ }
+
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ *slot = MigrateObject(object, target, object_size);
+
+ if (object_contents == POINTER_OBJECT) {
+ promotion_queue.insert(target, object_size);
+ }
+
+ Heap::tracer()->increment_promoted_objects_size(object_size);
+ return;
+ }
+ }
+ Object* result = Heap::new_space()->AllocateRaw(object_size);
+ ASSERT(!result->IsFailure());
+ *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+ return;
+}
+
+
+template<int object_size_in_words, ObjectContents object_contents>
+static inline void EvacuateObjectOfFixedSize(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ const int object_size = object_size_in_words << kPointerSizeLog2;
+ EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+}
+
+
+template<ObjectContents object_contents>
+static inline void EvacuateObjectOfFixedSize(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = map->instance_size();
+ EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+}
+
+
+static inline void EvacuateFixedArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = FixedArray::cast(object)->FixedArraySize();
+ EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static inline void EvacuateByteArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = ByteArray::cast(object)->ByteArraySize();
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static Scavenger GetScavengerForSize(int object_size,
+ ObjectContents object_contents) {
+ ASSERT(IsAligned(object_size, kPointerSize));
+ ASSERT(object_size < Page::kMaxHeapObjectSize);
+
+ switch (object_size >> kPointerSizeLog2) {
+#define CASE(n) \
+ case n: \
+ if (object_contents == DATA_OBJECT) { \
+ return static_cast<Scavenger>( \
+ &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
+ } else { \
+ return static_cast<Scavenger>( \
+ &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
+ }
+
+ CASE(1);
+ CASE(2);
+ CASE(3);
+ CASE(4);
+ CASE(5);
+ CASE(6);
+ CASE(7);
+ CASE(8);
+ CASE(9);
+ CASE(10);
+ CASE(11);
+ CASE(12);
+ CASE(13);
+ CASE(14);
+ CASE(15);
+ CASE(16);
+ default:
+ if (object_contents == DATA_OBJECT) {
+ return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
+ } else {
+ return static_cast<Scavenger>(
+ &EvacuateObjectOfFixedSize<POINTER_OBJECT>);
+ }
+
+#undef CASE
+ }
+}
+
+
+static inline void EvacuateSeqAsciiString(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqAsciiString::cast(object)->
+ SeqAsciiStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static inline void EvacuateSeqTwoByteString(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqTwoByteString::cast(object)->
+ SeqTwoByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+}
+
+
+static inline bool IsShortcutCandidate(int type) {
+ return ((type & kShortcutTypeMask) == kShortcutTypeTag);
+}
+
+
+static inline void EvacuateShortcutCandidate(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ ASSERT(IsShortcutCandidate(map->instance_type()));
+
+ if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
+ HeapObject* first =
+ HeapObject::cast(ConsString::cast(object)->unchecked_first());
+
+ *slot = first;
+
+ if (!Heap::InNewSpace(first)) {
+ object->set_map_word(MapWord::FromForwardingAddress(first));
+ return;
+ }
+
+ MapWord first_word = first->map_word();
+ if (first_word.IsForwardingAddress()) {
+ HeapObject* target = first_word.ToForwardingAddress();
+
+ *slot = target;
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+ return;
+ }
+
+ first->map()->Scavenge(slot, first);
+ object->set_map_word(MapWord::FromForwardingAddress(*slot));
+ return;
+ }
+
+ int object_size = ConsString::kSize;
+ EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
+}
+
+
+Scavenger Heap::GetScavenger(int instance_type, int instance_size) {
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ switch (instance_type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
+ return &EvacuateSeqAsciiString;
+ } else {
+ return &EvacuateSeqTwoByteString;
+ }
+
+ case kConsStringTag:
+ if (IsShortcutCandidate(instance_type)) {
+ return &EvacuateShortcutCandidate;
+ } else {
+ ASSERT(instance_size == ConsString::kSize);
+ return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
+ }
+
+ case kExternalStringTag:
+ ASSERT(instance_size == ExternalString::kSize);
+ return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
+ }
+ UNREACHABLE();
+ }
+
+ switch (instance_type) {
+ case BYTE_ARRAY_TYPE:
+ return reinterpret_cast<Scavenger>(&EvacuateByteArray);
+
+ case FIXED_ARRAY_TYPE:
+ return reinterpret_cast<Scavenger>(&EvacuateFixedArray);
+
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ return GetScavengerForSize(instance_size, POINTER_OBJECT);
+
+ case ODDBALL_TYPE:
+ return NULL;
+
+ case PROXY_TYPE:
+ return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
+
+ case MAP_TYPE:
+ return NULL;
+
+ case CODE_TYPE:
+ return NULL;
+
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ return NULL;
+
+ case HEAP_NUMBER_TYPE:
+ case FILLER_TYPE:
+ case PIXEL_ARRAY_TYPE:
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ case EXTERNAL_INT_ARRAY_TYPE:
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return GetScavengerForSize(instance_size, DATA_OBJECT);
+
+ case SHARED_FUNCTION_INFO_TYPE:
+ return GetScavengerForSize(SharedFunctionInfo::kAlignedSize,
+ POINTER_OBJECT);
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ return GetScavengerForSize(instance_size, POINTER_OBJECT);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
}
@@ -1129,103 +1386,8 @@
ASSERT(InFromSpace(object));
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
-
- // Optimization: Bypass flattened ConsString objects.
- if (IsShortcutCandidate(object, first_word.ToMap())) {
- object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
- *p = object;
- // After patching *p we have to repeat the checks that object is in the
- // active semispace of the young generation and not already copied.
- if (!InNewSpace(object)) return;
- first_word = object->map_word();
- if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
- return;
- }
- }
-
- int object_size = object->SizeFromMap(first_word.ToMap());
- // We rely on live objects in new space to be at least two pointers,
- // so we can store the from-space address and map pointer of promoted
- // objects in the to space.
- ASSERT(object_size >= 2 * kPointerSize);
-
- // If the object should be promoted, we try to copy it to old space.
- if (ShouldBePromoted(object->address(), object_size)) {
- Object* result;
- if (object_size > MaxObjectSizeInPagedSpace()) {
- result = lo_space_->AllocateRawFixedArray(object_size);
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
-
- if (object->IsFixedArray()) {
- // Save the from-space object pointer and its map pointer at the
- // top of the to space to be swept and copied later. Write the
- // forwarding address over the map word of the from-space
- // object.
- promotion_queue.insert(object, first_word.ToMap());
- object->set_map_word(MapWord::FromForwardingAddress(target));
-
- // Give the space allocated for the result a proper map by
- // treating it as a free list node (not linked into the free
- // list).
- FreeListNode* node = FreeListNode::FromAddress(target->address());
- node->set_size(object_size);
-
- *p = target;
- } else {
- // In large object space only fixed arrays might possibly contain
- // intergenerational references.
- // All other objects can be copied immediately and not revisited.
- *p = MigrateObject(object, target, object_size);
- }
-
- tracer()->increment_promoted_objects_size(object_size);
- return;
- }
- } else {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space_ ||
- target_space == Heap::old_data_space_);
- result = target_space->AllocateRaw(object_size);
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
- if (target_space == Heap::old_pointer_space_) {
- // Save the from-space object pointer and its map pointer at the
- // top of the to space to be swept and copied later. Write the
- // forwarding address over the map word of the from-space
- // object.
- promotion_queue.insert(object, first_word.ToMap());
- object->set_map_word(MapWord::FromForwardingAddress(target));
-
- // Give the space allocated for the result a proper map by
- // treating it as a free list node (not linked into the free
- // list).
- FreeListNode* node = FreeListNode::FromAddress(target->address());
- node->set_size(object_size);
-
- *p = target;
- } else {
- // Objects promoted to the data space can be copied immediately
- // and not revisited---we will never sweep that space for
- // pointers and the copied objects do not contain pointers to
- // new space objects.
- *p = MigrateObject(object, target, object_size);
-#ifdef DEBUG
- VerifyNonPointerSpacePointersVisitor v;
- (*p)->Iterate(&v);
-#endif
- }
- tracer()->increment_promoted_objects_size(object_size);
- return;
- }
- }
- }
- // The object should remain in new space or the old space allocation failed.
- Object* result = new_space_.AllocateRaw(object_size);
- // Failed allocation at this point is utterly unexpected.
- ASSERT(!result->IsFailure());
- *p = MigrateObject(object, HeapObject::cast(result), object_size);
+ Map* map = first_word.ToMap();
+ map->Scavenge(p, object);
}
@@ -1243,6 +1405,8 @@
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+ reinterpret_cast<Map*>(result)->
+ set_scavenger(GetScavenger(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
@@ -1259,6 +1423,7 @@
Map* map = reinterpret_cast<Map*>(result);
map->set_map(meta_map());
map->set_instance_type(instance_type);
+ map->set_scavenger(GetScavenger(instance_type, instance_size));
map->set_prototype(null_value());
map->set_constructor(null_value());
map->set_instance_size(instance_size);
@@ -1891,6 +2056,7 @@
share->set_name(name);
Code* illegal = Builtins::builtin(Builtins::Illegal);
share->set_code(illegal);
+ share->set_scope_info(SerializedScopeInfo::Empty());
Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0);
@@ -2315,14 +2481,8 @@
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return;
- // Check that there are heap allocated locals in the scopeinfo. If
- // there is, we are potentially using eval and need the scopeinfo
- // for variable resolution.
- if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code()))
- return;
-
- HandleScope scope;
// Compute the lazy compilable version of the code.
+ HandleScope scope;
function_info->set_code(*ComputeLazyCompile(function_info->length()));
}
@@ -2348,7 +2508,6 @@
Object* Heap::CreateCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference) {
// Allocate ByteArray before the Code object, so that we do not risk
@@ -2358,9 +2517,7 @@
// Compute size
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
- int sinfo_size = 0;
- if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
- int obj_size = Code::SizeFor(body_size, sinfo_size);
+ int obj_size = Code::SizeFor(body_size);
ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
Object* result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
@@ -2377,7 +2534,6 @@
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
- code->set_sinfo_size(sinfo_size);
code->set_flags(flags);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -2390,7 +2546,6 @@
// objects. These pointers can include references to the code object itself,
// through the self_reference parameter.
code->CopyFrom(desc);
- if (sinfo != NULL) sinfo->Serialize(code); // write scope info
#ifdef DEBUG
code->Verify();
@@ -2431,9 +2586,7 @@
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
- int sinfo_size = code->sinfo_size();
-
- int new_obj_size = Code::SizeFor(new_body_size, sinfo_size);
+ int new_obj_size = Code::SizeFor(new_body_size);
Address old_addr = code->address();
@@ -2460,8 +2613,6 @@
// Copy patched rinfo.
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
- // Copy sinfo.
- memcpy(new_code->sinfo_start(), code->sinfo_start(), code->sinfo_size());
// Relocate the copy.
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
@@ -3657,7 +3808,7 @@
Max(start, prev_map + Map::kPointerFieldsBeginOffset);
Address pointer_fields_end =
- Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
+ Min(prev_map + Map::kPointerFieldsEndOffset, end);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
@@ -3675,10 +3826,11 @@
if (map_aligned_end != end) {
ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
- Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
+ Address pointer_fields_start =
+ map_aligned_end + Map::kPointerFieldsBeginOffset;
Address pointer_fields_end =
- Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
+ Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
@@ -3691,9 +3843,9 @@
}
-void Heap::IterateAndMarkPointersToNewSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
+void Heap::IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
Address slot_address = start;
Page* page = Page::FromAddress(start);
@@ -3701,7 +3853,7 @@
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (Heap::InNewSpace(*slot)) {
+ if (Heap::InFromSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
callback(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
diff --git a/src/heap.h b/src/heap.h
index df3ba0e..18991b4 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -36,8 +36,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class ZoneScopeInfo;
// Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
@@ -626,7 +624,6 @@
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
static Object* CreateCode(const CodeDesc& desc,
- ZoneScopeInfo* sinfo,
Code::Flags flags,
Handle<Object> self_reference);
@@ -774,11 +771,12 @@
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
- // Iterate pointers to new space found in memory interval from start to end.
+ // Iterate pointers to from semispace of new space found in memory interval
+ // from start to end.
// Update dirty marks for page containing start address.
- static void IterateAndMarkPointersToNewSpace(Address start,
- Address end,
- ObjectSlotCallback callback);
+ static void IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
@@ -985,6 +983,8 @@
static void RecordStats(HeapStats* stats);
+ static Scavenger GetScavenger(int instance_type, int instance_size);
+
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
@@ -1232,17 +1232,7 @@
set_instanceof_cache_function(the_hole_value());
}
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- static inline HeapObject* MigrateObject(HeapObject* source,
- HeapObject* target,
- int size);
-
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- // Record the copy of an object in the NewSpace's statistics.
- static void RecordCopiedObject(HeapObject* obj);
-
// Record statistics before and after garbage collection.
static void ReportStatisticsBeforeGC();
static void ReportStatisticsAfterGC();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index ce2099d..e011237 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -121,7 +121,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!code->IsCode()) return;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 0f72074..20fbfa3 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -34,12 +34,9 @@
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
-#include "jsregexp.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
#include "register-allocator-inl.h"
-#include "runtime.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
@@ -143,7 +140,7 @@
// -------------------------------------------------------------------------
-// CodeGenerator implementation
+// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
@@ -374,12 +371,11 @@
}
// Adjust for function-level loop nesting.
- ASSERT_EQ(info->loop_nesting(), loop_nesting_);
+ ASSERT_EQ(loop_nesting_, info->loop_nesting());
loop_nesting_ = 0;
// Code generation state must be reset.
ASSERT(state_ == NULL);
- ASSERT(loop_nesting() == 0);
ASSERT(!function_return_is_shadowed_);
function_return_.Unuse();
DeleteFrame();
@@ -646,7 +642,6 @@
} else {
JumpTarget true_target;
JumpTarget false_target;
-
ControlDestination dest(&true_target, &false_target, true);
LoadCondition(expr, &dest, false);
@@ -784,9 +779,9 @@
JumpTarget done;
bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has already
- // been written to. This can happen if the a function has a local
- // variable named 'arguments'.
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop();
if (probe.is_constant()) {
@@ -1434,8 +1429,8 @@
} else {
unsigned_left >>= shift_amount;
}
- ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
- answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
break;
}
default:
@@ -1919,12 +1914,12 @@
void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub igostub(
+ GenericBinaryOpStub stub(
op_,
overwrite_mode_,
NO_SMI_CODE_IN_STUB,
TypeInfo::Combine(TypeInfo::Smi(), type_info_));
- igostub.GenerateCall(masm_, value_, src_);
+ stub.GenerateCall(masm_, value_, src_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -2424,6 +2419,7 @@
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
default: {
Result constant_operand(value);
@@ -2487,8 +2483,7 @@
}
ASSERT(cc == less || cc == equal || cc == greater_equal);
- // If either side is a constant of some sort, we can probably optimize the
- // comparison.
+ // If either side is a constant smi, optimize the comparison.
bool left_side_constant_smi = false;
bool left_side_constant_null = false;
bool left_side_constant_1_char_string = false;
@@ -2513,114 +2508,11 @@
}
if (left_side_constant_smi || right_side_constant_smi) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side.handle())->value();
- int right_value = Smi::cast(*right_side.handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side.ToRegister();
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
-
- if (left_side.is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_side.reg());
- }
- } else {
- JumpTarget is_smi;
- __ test(left_side.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(zero, taken);
-
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- if (!is_loop_condition &&
- CpuFeatures::IsSupported(SSE2) &&
- right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_number.Branch(not_equal, &left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
-
- // Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ cmp(result.reg(), 0);
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
- }
-
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test smi equality and comparison by signed int comparison.
- if (IsUnsafeSmi(right_side.handle())) {
- right_side.ToRegister();
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- } else {
- __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
- }
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
-
+ bool is_loop_condition = (node->AsExpression() != NULL) &&
+ node->AsExpression()->is_loop_condition();
+ ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
+ left_side_constant_smi, right_side_constant_smi,
+ is_loop_condition);
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@@ -2780,13 +2672,14 @@
}
} else {
// Neither side is a constant Smi, constant 1-char string or constant null.
- // If either side is a non-smi constant, or known to be a heap number skip
- // the smi check.
+ // If either side is a non-smi constant, or known to be a heap number,
+ // skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi()) ||
left_side.type_info().IsDouble() ||
right_side.type_info().IsDouble();
+
NaNInformation nan_info =
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
kBothCouldBeNaN :
@@ -2811,14 +2704,15 @@
right_side.ToRegister();
if (known_non_smi) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
+ // Inlined equality check:
+ // If at least one of the objects is not NaN, then if the objects
+ // are identical, they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
__ cmp(left_side.reg(), Operand(right_side.reg()));
dest->true_target()->Branch(equal);
}
- // Inline number comparison.
+ // Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
@@ -2856,7 +2750,7 @@
dest->true_target()->Branch(equal);
}
- // Inline number comparison.
+ // Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
@@ -2882,6 +2776,139 @@
}
+void CodeGenerator::ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* dest,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side->handle())->value();
+ int right_value = Smi::cast(*right_side->handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result* temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may re-introduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side->ToRegister();
+ Register left_reg = left_side->reg();
+ Handle<Object> right_val = right_side->handle();
+
+ if (left_side->is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left_reg);
+ }
+ // Test smi equality and comparison by signed int comparison.
+ if (IsUnsafeSmi(right_side->handle())) {
+ right_side->ToRegister();
+ __ cmp(left_reg, Operand(right_side->reg()));
+ } else {
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
+ }
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ } else {
+ // Only the case where the left side could possibly be a non-smi is left.
+ JumpTarget is_smi;
+ if (cc == equal) {
+ // We can do the equality comparison before the smi check.
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
+ dest->true_target()->Branch(equal);
+ __ test(left_reg, Immediate(kSmiTagMask));
+ dest->false_target()->Branch(zero);
+ } else {
+ // Do the smi check, then the comparison.
+ JumpTarget is_not_smi;
+ __ test(left_reg, Immediate(kSmiTagMask));
+ is_smi.Branch(zero, left_side, right_side);
+ }
+
+ // Jump or fall through to here if we are comparing a non-smi to a
+ // constant smi. If the non-smi is a heap number and this is not
+ // a loop condition, inline the floating point code.
+ if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ CpuFeatures::Scope use_sse2(SSE2);
+ JumpTarget not_number;
+ __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ not_number.Branch(not_equal, left_side);
+ __ movdbl(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = Smi::cast(*right_val)->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ mov(temp.reg(), Immediate(value));
+ __ cvtsi2sd(xmm0, Operand(temp.reg()));
+ temp.Unuse();
+ }
+ __ ucomisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, left_side);
+ left_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+ not_number.Bind(left_side);
+ }
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, left_side, right_side);
+ result.ToRegister();
+ __ test(result.reg(), Operand(result.reg()));
+ result.Unuse();
+ if (cc == equal) {
+ dest->Split(cc);
+ } else {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ // It is important for performance for this case to be at the end.
+ is_smi.Bind(left_side, right_side);
+ if (IsUnsafeSmi(right_side->handle())) {
+ right_side->ToRegister();
+ __ cmp(left_reg, Operand(right_side->reg()));
+ } else {
+ __ cmp(Operand(left_reg), Immediate(right_side->handle()));
+ }
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ }
+ }
+ }
+}
+
+
// Check that the comparison operand is a number. Jump to not_numbers jump
// target passing the left and right result if the operand is not a number.
static void CheckComparisonOperand(MacroAssembler* masm_,
@@ -2941,19 +2968,19 @@
// target passing the left and right result if the operand is not a number.
static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
Result* operand,
- XMMRegister reg,
+ XMMRegister xmm_reg,
Result* left_side,
Result* right_side,
JumpTarget* not_numbers) {
Label done;
if (operand->type_info().IsDouble()) {
// Operand is known to be a heap number, just load it.
- __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
} else if (operand->type_info().IsSmi()) {
// Operand is known to be a smi. Convert it to double and keep the original
// smi.
__ SmiUntag(operand->reg());
- __ cvtsi2sd(reg, Operand(operand->reg()));
+ __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
__ SmiTag(operand->reg());
} else {
// Operand type not known, check for smi or heap number.
@@ -2965,13 +2992,13 @@
Immediate(Factory::heap_number_map()));
not_numbers->Branch(not_equal, left_side, right_side, taken);
}
- __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&smi);
// Comvert smi to float and keep the original smi.
__ SmiUntag(operand->reg());
- __ cvtsi2sd(reg, Operand(operand->reg()));
+ __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
__ SmiTag(operand->reg());
__ jmp(&done);
}
@@ -3568,8 +3595,10 @@
return_value->ToRegister(eax);
// Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
+#endif
// Leave the frame and return popping the arguments and the
// receiver.
@@ -3690,7 +3719,6 @@
}
}
-
// The last instruction emitted was a jump, either to the default
// clause or the break target, or else to a case body from the loop
// that compiles the tests.
@@ -3778,8 +3806,8 @@
// Compile the test.
switch (info) {
case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back to
- // the top and bind the break target at the exit.
+ // If control flow can fall off the end of the body, jump back
+ // to the top and bind the break target at the exit.
if (has_valid_frame()) {
node->continue_target()->Jump();
}
@@ -3815,6 +3843,8 @@
}
DecrementLoopNesting();
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
}
@@ -3899,8 +3929,8 @@
break;
case DONT_KNOW:
if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom, then
- // it is the continue target.
+ // If we have chosen to recompile the test at the bottom,
+ // then it is the continue target.
if (node->continue_target()->is_linked()) {
node->continue_target()->Bind();
}
@@ -4016,6 +4046,7 @@
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
+
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
@@ -4125,8 +4156,8 @@
break;
}
- // The break target may be already bound (by the condition), or
- // there may not be a valid frame. Bind it only if needed.
+ // The break target may be already bound (by the condition), or there
+ // may not be a valid frame. Bind it only if needed.
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
@@ -6406,6 +6437,27 @@
}
+ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
+ // typeof(arg) == function).
+ // It includes undetectable objects (as opposed to IsObject).
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+
+ // Check that this is an object.
+ frame_->Spill(value.reg());
+ __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
+ value.Unuse();
+ destination()->Split(above_equal);
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -6678,11 +6730,8 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ push(Immediate(Smi::FromInt(0)));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(edi, eax);
__ bind(&heapnumber_allocated);
@@ -11638,6 +11687,8 @@
void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
Label check_unequal_objects, done;
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -12531,8 +12582,10 @@
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
// condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
@@ -12542,6 +12595,8 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a432c13..24f9957 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -560,6 +560,17 @@
Condition cc,
bool strict,
ControlDestination* destination);
+
+ // If at least one of the sides is a constant smi, generate optimized code.
+ void ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* destination,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition);
+
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
@@ -621,6 +632,7 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 2ca1105..b2ff1fd 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1985,6 +1985,26 @@
}
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(equal, if_false);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(above_equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2242,11 +2262,8 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ push(Immediate(Smi::FromInt(0)));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(edi, eax);
__ bind(&heapnumber_allocated);
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index b0de827..a7930fb 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -872,7 +872,6 @@
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
- NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 26361d1..e81fbc7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -111,7 +111,7 @@
Register receiver,
String* name,
Register r0,
- Register extra) {
+ Register r1) {
ASSERT(name->IsSymbol());
__ IncrementCounter(&Counters::negative_lookups, 1);
__ IncrementCounter(&Counters::negative_lookups_miss, 1);
@@ -121,11 +121,13 @@
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
// Bail out if the receiver has a named interceptor or requires access checks.
- __ test(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ kInterceptorOrAccessCheckNeededMask);
__ j(not_zero, miss_label, not_taken);
+ // Check that receiver is a JSObject.
__ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
__ j(below, miss_label, not_taken);
@@ -158,10 +160,7 @@
for (int i = 0; i < kProbes; i++) {
// r0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
- if (extra.is(no_reg)) {
- __ push(receiver);
- }
- Register index = extra.is(no_reg) ? receiver : extra;
+ Register index = r1;
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
@@ -173,16 +172,12 @@
ASSERT(StringDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = extra.is(no_reg) ? properties : extra;
+ Register entity_name = r1;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, Factory::undefined_value());
- if (extra.is(no_reg)) {
- // 'receiver' shares a register with 'entity_name'.
- __ pop(receiver);
- }
if (i != kProbes - 1) {
__ j(equal, &done, taken);
@@ -190,10 +185,11 @@
__ cmp(entity_name, Handle<String>(name));
__ j(equal, miss_label, not_taken);
- if (extra.is(no_reg)) {
- // Restore the properties if their register was occupied by the name.
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
- }
+ // Check if the entry name is not a symbol.
+ __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ kIsSymbolMask);
+ __ j(zero, miss_label, not_taken);
} else {
// Give up probing if still not found the undefined value.
__ j(not_equal, miss_label, not_taken);
@@ -525,6 +521,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -541,6 +538,7 @@
receiver,
scratch1,
scratch2,
+ scratch3,
holder,
lookup,
name,
@@ -552,6 +550,7 @@
receiver,
scratch1,
scratch2,
+ scratch3,
name,
holder,
miss);
@@ -564,6 +563,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
@@ -603,7 +603,7 @@
Register holder =
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
- scratch2, name, depth1, miss);
+ scratch2, scratch3, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -619,7 +619,7 @@
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ scratch2, scratch3, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -655,12 +655,13 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
+ scratch1, scratch2, scratch3, name,
miss_label);
__ EnterInternalFrame();
@@ -862,14 +863,15 @@
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra) {
+ Label* miss) {
// Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
- ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch));
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
// Keep track of the current object in register reg.
Register reg = object_reg;
JSObject* current = object;
@@ -909,31 +911,31 @@
miss,
reg,
name,
- scratch,
- extra);
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ scratch1,
+ scratch2);
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else if (Heap::InNewSpace(prototype)) {
// Get the map of the current object.
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(Operand(scratch), Immediate(Handle<Map>(current->map())));
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss, not_taken);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
// Restore scratch register to be the map of the object.
// We load the prototype from the map in the scratch register.
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
// The prototype is in new space; we cannot store a reference
// to it in the code. Load it from the map.
reg = holder_reg; // from now the object is in holder_reg
- __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
// Check the map of the current object.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -944,7 +946,7 @@
// after the map check so that we know that the object is
// actually a global object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
}
// The prototype is in old space; load it directly.
reg = holder_reg; // from now the object is in holder_reg
@@ -971,7 +973,7 @@
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
};
// If we've skipped any global objects, it's not enough to verify
@@ -981,7 +983,7 @@
object,
holder,
name,
- scratch,
+ scratch1,
miss);
if (result->IsFailure()) set_failure(Failure::cast(result));
@@ -995,6 +997,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -1005,7 +1008,7 @@
// Check the prototype chain.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
@@ -1019,6 +1022,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -1030,7 +1034,7 @@
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
Handle<AccessorInfo> callback_handle(callback);
@@ -1094,6 +1098,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -1104,7 +1109,7 @@
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ mov(eax, Handle<Object>(value));
@@ -1119,6 +1124,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
@@ -1147,7 +1153,8 @@
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
@@ -1195,6 +1202,7 @@
lookup->holder(),
scratch1,
scratch2,
+ scratch3,
name,
miss);
}
@@ -1235,7 +1243,7 @@
// Check that the maps haven't changed.
Register holder_reg =
CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
__ pop(scratch2); // save old return address
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
@@ -1310,8 +1318,8 @@
__ j(zero, &miss, not_taken);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax,
- name, &miss, edi);
+ Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
+ name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -1373,7 +1381,7 @@
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss, edi);
+ eax, edi, name, &miss);
if (argc == 0) {
// Noop, return the length.
@@ -1519,7 +1527,7 @@
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss, edi);
+ eax, edi, name, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1594,7 +1602,7 @@
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
Register receiver = ebx;
Register index = edi;
@@ -1659,7 +1667,7 @@
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
Register receiver = eax;
Register index = edi;
@@ -1764,7 +1772,7 @@
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, depth, &miss, edi);
+ ebx, eax, edi, name, depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1787,7 +1795,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
}
break;
@@ -1807,7 +1815,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
}
break;
}
@@ -1828,7 +1836,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss, edi);
+ ebx, edx, edi, name, &miss);
}
break;
}
@@ -1888,6 +1896,7 @@
edx,
ebx,
edi,
+ eax,
&miss);
// Restore receiver.
@@ -1950,7 +1959,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi);
+ CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -2226,7 +2235,7 @@
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
- CheckPrototypes(object, eax, last, ebx, edx, name, &miss);
+ CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -2263,7 +2272,7 @@
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
+ GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2284,7 +2293,7 @@
Label miss;
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+ bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -2307,7 +2316,7 @@
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
+ GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2338,6 +2347,7 @@
ecx,
edx,
ebx,
+ edi,
name,
&miss);
@@ -2370,7 +2380,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi);
+ CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
// Get the value from the cell.
__ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -2415,7 +2425,7 @@
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadField(receiver, holder, edx, ebx, ecx, index, name, &miss);
+ GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -2444,7 +2454,7 @@
__ j(not_equal, &miss, not_taken);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx,
+ bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -2474,7 +2484,7 @@
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadConstant(receiver, holder, edx, ebx, ecx,
+ GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -2510,6 +2520,7 @@
eax,
ecx,
ebx,
+ edi,
name,
&miss);
__ bind(&miss);
diff --git a/src/ic.cc b/src/ic.cc
index 12332f9..9bb18f7 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -525,17 +525,12 @@
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-#ifndef V8_TARGET_ARCH_IA32
- // Normal objects only implemented for IA32 by now.
- if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
-#else
if (lookup->holder() != *object &&
HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
// Suppress optimization for prototype chains with slow properties objects
// in the middle.
return;
}
-#endif
// Compute the number of arguments.
int argc = target()->arguments_count();
diff --git a/src/json.js b/src/json.js
index cdb10be..e7ec610 100644
--- a/src/json.js
+++ b/src/json.js
@@ -29,7 +29,7 @@
function ParseJSONUnfiltered(text) {
var s = $String(text);
- var f = %CompileString(text, true);
+ var f = %CompileString(s, true);
return f();
}
diff --git a/src/macros.py b/src/macros.py
index b4be15b..643a285 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -115,7 +115,8 @@
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
# This is the same as being either a function or an object in V8 terminology.
-macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
+# In addition, an undetectable object is also included by this.
+macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 95afb4a..d9b0222 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "execution.h"
+#include "heap-profiler.h"
#include "global-handles.h"
#include "ic-inl.h"
#include "mark-compact.h"
@@ -425,8 +426,10 @@
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
MarkingVisitor visitor; // Has no state or contents.
- visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
- HeapObject::RawField(map, Map::kSize));
+ visitor.VisitPointers(HeapObject::RawField(map,
+ Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(map,
+ Map::kPointerFieldsEndOffset));
}
@@ -2216,6 +2219,7 @@
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2262,6 +2266,7 @@
// Notify the logger that compiled code has moved.
PROFILE(CodeMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2306,6 +2311,7 @@
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
diff --git a/src/messages.cc b/src/messages.cc
index 7cb1d20..ec91cc8 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -66,7 +66,8 @@
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace) {
+ Handle<String> stack_trace,
+ Handle<JSArray> stack_frames) {
// Build error message object
v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
Handle<Object> type_str = Factory::LookupAsciiSymbol(type);
@@ -90,13 +91,17 @@
Handle<Object> stack_trace_val = stack_trace.is_null()
? Factory::undefined_value()
: Handle<Object>::cast(stack_trace);
- const int argc = 6;
+ Handle<Object> stack_frames_val = stack_frames.is_null()
+ ? Factory::undefined_value()
+ : Handle<Object>::cast(stack_frames);
+ const int argc = 7;
Object** argv[argc] = { type_str.location(),
array.location(),
start_handle.location(),
end_handle.location(),
script.location(),
- stack_trace_val.location() };
+ stack_trace_val.location(),
+ stack_frames_val.location() };
// Setup a catch handler to catch exceptions in creating the message. This
// handler is non-verbose to avoid calling MakeMessage recursively in case of
diff --git a/src/messages.h b/src/messages.h
index 80ce8eb..440bde8 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -96,7 +96,8 @@
static Handle<Object> MakeMessageObject(const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace);
+ Handle<String> stack_trace,
+ Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
static void ReportMessage(MessageLocation* loc, Handle<Object> message);
diff --git a/src/messages.js b/src/messages.js
index 99ba454..b0f8aa1 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -181,7 +181,6 @@
// RangeError
invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded",
- apply_overflow: "Function.prototype.apply cannot support %0 arguments",
// SyntaxError
unable_to_parse: "Parse error",
duplicate_regexp_flag: "Duplicate RegExp flag %0",
@@ -601,18 +600,22 @@
}
-function ErrorMessage(type, args, startPos, endPos, script, stackTrace) {
+function ErrorMessage(type, args, startPos, endPos, script, stackTrace,
+ stackFrames) {
this.startPos = startPos;
this.endPos = endPos;
this.type = type;
this.args = args;
this.script = script;
this.stackTrace = stackTrace;
+ this.stackFrames = stackFrames;
}
-function MakeMessage(type, args, startPos, endPos, script, stackTrace) {
- return new ErrorMessage(type, args, startPos, endPos, script, stackTrace);
+function MakeMessage(type, args, startPos, endPos, script, stackTrace,
+ stackFrames) {
+ return new ErrorMessage(type, args, startPos, endPos, script, stackTrace,
+ stackFrames);
}
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index f8b88d7..79801f0 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -907,6 +907,11 @@
}
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0f0a746..3ad94e8 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -355,6 +355,7 @@
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateStringAdd(ZoneList<Expression*>* args);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 0b5ff99..d340e4b 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -789,6 +789,7 @@
CHECK(IsSharedFunctionInfo());
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
+ VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kScriptOffset);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 0e45550..101096d 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2060,6 +2060,21 @@
ptr[index] = value;
}
+inline Scavenger Map::scavenger() {
+ Scavenger callback = reinterpret_cast<Scavenger>(
+ READ_INTPTR_FIELD(this, kScavengerCallbackOffset));
+
+ ASSERT(callback == Heap::GetScavenger(instance_type(),
+ instance_size()));
+
+ return callback;
+}
+
+inline void Map::set_scavenger(Scavenger callback) {
+ WRITE_INTPTR_FIELD(this,
+ kScavengerCallbackOffset,
+ reinterpret_cast<intptr_t>(callback));
+}
int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
@@ -2632,6 +2647,19 @@
}
+SerializedScopeInfo* SharedFunctionInfo::scope_info() {
+ return reinterpret_cast<SerializedScopeInfo*>(
+ READ_FIELD(this, kScopeInfoOffset));
+}
+
+
+void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
+ WriteBarrierMode mode) {
+ WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
+ CONDITIONAL_WRITE_BARRIER(this, kScopeInfoOffset, mode);
+}
+
+
bool SharedFunctionInfo::is_compiled() {
// TODO(1242782): Create a code kind for uncompiled code.
return code()->kind() != Code::STUB;
@@ -2808,7 +2836,6 @@
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
byte* Code::instruction_start() {
@@ -2852,11 +2879,6 @@
}
-byte* Code::sinfo_start() {
- return FIELD_ADDR(this, kHeaderSize + body_size());
-}
-
-
ACCESSORS(JSArray, length, Object, kLengthOffset)
diff --git a/src/objects.cc b/src/objects.cc
index e79a550..8f668fb 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2190,6 +2190,8 @@
int new_instance_size = map()->instance_size() - instance_size_delta;
new_map->set_inobject_properties(0);
new_map->set_instance_size(new_instance_size);
+ new_map->set_scavenger(Heap::GetScavenger(new_map->instance_type(),
+ new_map->instance_size()));
Heap::CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
}
@@ -5033,7 +5035,7 @@
void Map::MapIterateBody(ObjectVisitor* v) {
// Assumes all Object* members are contiguously allocated!
- IteratePointers(v, kPrototypeOffset, kCodeCacheOffset + kPointerSize);
+ IteratePointers(v, kPointerFieldsBeginOffset, kPointerFieldsEndOffset);
}
@@ -5325,8 +5327,6 @@
for (; !it.done(); it.next()) {
it.rinfo()->Visit(v);
}
-
- ScopeInfo<>::IterateScopeInfo(this, v);
}
@@ -7338,6 +7338,46 @@
}
+// Find entry for key otherwise return kNotFound.
+int StringDictionary::FindEntry(String* key) {
+ if (!key->IsSymbol()) {
+ return HashTable<StringDictionaryShape, String*>::FindEntry(key);
+ }
+
+ // Optimized for symbol key. Knowledge of the key type allows:
+ // 1. Move the check if the key is a symbol out of the loop.
+ // 2. Avoid comparing hash codes in symbol to symbol comparision.
+ // 3. Detect a case when a dictionary key is not a symbol but the key is.
+ // In case of positive result the dictionary key may be replaced by
+ // the symbol with minimal performance penalty. It gives a chance to
+ // perform further lookups in code stubs (and significant performance boost
+ // a certain style of code).
+
+ // EnsureCapacity will guarantee the hash table is never full.
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(key->Hash(), capacity);
+ uint32_t count = 1;
+
+ while (true) {
+ int index = EntryToIndex(entry);
+ Object* element = get(index);
+ if (element->IsUndefined()) break; // Empty entry.
+ if (key == element) return entry;
+ if (!element->IsSymbol() &&
+ !element->IsNull() &&
+ String::cast(element)->Equals(key)) {
+ // Replace a non-symbol key by the equivalent symbol for faster further
+ // lookups.
+ set(index, key);
+ return entry;
+ }
+ ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
+ entry = NextProbe(entry, count++, capacity);
+ }
+ return kNotFound;
+}
+
+
template<typename Shape, typename Key>
Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
diff --git a/src/objects.h b/src/objects.h
index 4a7dee6..2b64611 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2012,7 +2012,7 @@
static const int kMaxCapacity =
(FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
- // Find entry for key otherwise return -1.
+ // Find entry for key otherwise return kNotFound.
int FindEntry(Key key);
protected:
@@ -2294,6 +2294,10 @@
// For transforming properties of a JSObject.
Object* TransformPropertiesToFastFor(JSObject* obj,
int unused_property_fields);
+
+ // Find entry for key otherwise return kNotFound. Optimzed version of
+ // HashTable::FindEntry.
+ int FindEntry(String* key);
};
@@ -2744,10 +2748,6 @@
inline int relocation_size();
- // [sinfo_size]: Size of scope information.
- inline int sinfo_size();
- inline void set_sinfo_size(int value);
-
// [flags]: Various code flags.
inline Flags flags();
inline void set_flags(Flags flags);
@@ -2816,9 +2816,6 @@
// Returns true if pc is inside this object's instructions.
inline bool contains(byte* pc);
- // Returns the address of the scope information.
- inline byte* sinfo_start();
-
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
void Relocate(intptr_t delta);
@@ -2826,12 +2823,10 @@
// Migrate code described by desc.
void CopyFrom(const CodeDesc& desc);
- // Returns the object size for a given body and sinfo size (Used for
- // allocation).
- static int SizeFor(int body_size, int sinfo_size) {
+ // Returns the object size for a given body (used for allocation).
+ static int SizeFor(int body_size) {
ASSERT_SIZE_TAG_ALIGNED(body_size);
- ASSERT_SIZE_TAG_ALIGNED(sinfo_size);
- return RoundUp(kHeaderSize + body_size + sinfo_size, kCodeAlignment);
+ return RoundUp(kHeaderSize + body_size, kCodeAlignment);
}
// Calculate the size of the code object to report for log events. This takes
@@ -2851,7 +2846,7 @@
static inline Code* cast(Object* obj);
// Dispatched behavior.
- int CodeSize() { return SizeFor(body_size(), sinfo_size()); }
+ int CodeSize() { return SizeFor(body_size()); }
void CodeIterateBody(ObjectVisitor* v);
#ifdef DEBUG
void CodePrint();
@@ -2865,8 +2860,7 @@
// Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
- static const int kSInfoSizeOffset = kRelocationInfoOffset + kPointerSize;
- static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
+ static const int kFlagsOffset = kRelocationInfoOffset + kPointerSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -2899,6 +2893,7 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
+typedef void (*Scavenger)(Map* map, HeapObject** slot, HeapObject* object);
// All heap objects have a Map that describes their structure.
// A Map contains information about:
@@ -3100,6 +3095,13 @@
void MapVerify();
#endif
+ inline Scavenger scavenger();
+ inline void set_scavenger(Scavenger callback);
+
+ inline void Scavenge(HeapObject** slot, HeapObject* obj) {
+ scavenger()(this, slot, obj);
+ }
+
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
@@ -3110,7 +3112,8 @@
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kPadStart = kCodeCacheOffset + kPointerSize;
+ static const int kScavengerCallbackOffset = kCodeCacheOffset + kPointerSize;
+ static const int kPadStart = kScavengerCallbackOffset + kPointerSize;
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
@@ -3273,6 +3276,9 @@
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // [scope_info]: Scope info.
+ DECL_ACCESSORS(scope_info, SerializedScopeInfo)
+
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
@@ -3426,7 +3432,8 @@
// Pointer fields.
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kConstructStubOffset = kCodeOffset + kPointerSize;
+ static const int kScopeInfoOffset = kCodeOffset + kPointerSize;
+ static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
static const int kInstanceClassNameOffset =
kConstructStubOffset + kPointerSize;
static const int kFunctionDataOffset =
diff --git a/src/parser.cc b/src/parser.cc
index fb58cfa..dd5f9bd 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -36,6 +36,7 @@
#include "parser.h"
#include "platform.h"
#include "runtime.h"
+#include "scopeinfo.h"
#include "scopes.h"
#include "string-stream.h"
@@ -1969,7 +1970,8 @@
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared =
- Factory::NewSharedFunctionInfo(name, literals, code);
+ Factory::NewSharedFunctionInfo(name, literals, code,
+ Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
shared->set_construct_stub(*construct_stub);
// Copy the function data to the shared function info.
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index e3ae867..58ff154 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -83,6 +83,12 @@
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ *ptr = value;
+}
+
+
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // OpenBSD runs on anything.
}
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 57ff661..5315bfb 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -181,8 +181,6 @@
}
-namespace {
-
class DeleteNodesCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
@@ -194,8 +192,6 @@
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
-} // namespace
-
ProfileTree::ProfileTree()
: root_entry_(Logger::FUNCTION_TAG,
@@ -240,8 +236,6 @@
}
-namespace {
-
struct NodesPair {
NodesPair(ProfileNode* src, ProfileNode* dst)
: src(src), dst(dst) { }
@@ -294,8 +288,6 @@
int security_token_id_;
};
-} // namespace
-
void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
FilteredCloneCallback cb(root_, security_token_id);
@@ -309,8 +301,6 @@
}
-namespace {
-
class Position {
public:
explicit Position(ProfileNode* node)
@@ -328,8 +318,6 @@
int child_idx_;
};
-} // namespace
-
// Non-recursive implementation of a depth-first post-order tree traversal.
template <typename Callback>
@@ -355,8 +343,6 @@
}
-namespace {
-
class CalculateTotalTicksCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
@@ -370,8 +356,6 @@
}
};
-} // namespace
-
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
@@ -877,6 +861,11 @@
}
+void HeapEntry::SetUnidirAutoIndexReference(HeapEntry* entry) {
+ children_.Add(new HeapGraphEdge(next_auto_index_++, this, entry));
+}
+
+
int HeapEntry::TotalSize() {
return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
}
@@ -888,12 +877,12 @@
}
-int HeapEntry::CalculateTotalSize() {
- snapshot_->ClearPaint();
+template<class Visitor>
+void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
List<HeapEntry*> list(10);
list.Add(this);
- total_size_ = self_size_;
this->PaintReachable();
+ visitor->Apply(this);
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
@@ -902,15 +891,48 @@
if (!child->painted_reachable()) {
list.Add(child);
child->PaintReachable();
- total_size_ += child->self_size_;
+ visitor->Apply(child);
}
}
}
- return total_size_;
}
-namespace {
+class NullClass {
+ public:
+ void Apply(HeapEntry* entry) { }
+};
+
+void HeapEntry::PaintAllReachable() {
+ NullClass null;
+ ApplyAndPaintAllReachable(&null);
+}
+
+
+class TotalSizeCalculator {
+ public:
+ TotalSizeCalculator()
+ : total_size_(0) {
+ }
+
+ int total_size() const { return total_size_; }
+
+ void Apply(HeapEntry* entry) {
+ total_size_ += entry->self_size();
+ }
+
+ private:
+ int total_size_;
+};
+
+int HeapEntry::CalculateTotalSize() {
+ snapshot_->ClearPaint();
+ TotalSizeCalculator calc;
+ ApplyAndPaintAllReachable(&calc);
+ total_size_ = calc.total_size();
+ return total_size_;
+}
+
class NonSharedSizeCalculator {
public:
@@ -930,41 +952,26 @@
int non_shared_total_size_;
};
-} // namespace
-
int HeapEntry::CalculateNonSharedTotalSize() {
// To calculate non-shared total size, first we paint all reachable
// nodes in one color, then we paint all nodes reachable from other
// nodes with a different color. Then we consider only nodes painted
- // with the first color for caclulating the total size.
+ // with the first color for calculating the total size.
snapshot_->ClearPaint();
+ PaintAllReachable();
+
List<HeapEntry*> list(10);
- list.Add(this);
- this->PaintReachable();
+ if (this != snapshot_->root()) {
+ list.Add(snapshot_->root());
+ snapshot_->root()->PaintReachableFromOthers();
+ }
while (!list.is_empty()) {
HeapEntry* entry = list.RemoveLast();
const int children_count = entry->children_.length();
for (int i = 0; i < children_count; ++i) {
HeapEntry* child = entry->children_[i]->to();
- if (!child->painted_reachable()) {
- list.Add(child);
- child->PaintReachable();
- }
- }
- }
-
- List<HeapEntry*> list2(10);
- if (this != snapshot_->root()) {
- list2.Add(snapshot_->root());
- snapshot_->root()->PaintReachableFromOthers();
- }
- while (!list2.is_empty()) {
- HeapEntry* entry = list2.RemoveLast();
- const int children_count = entry->children_.length();
- for (int i = 0; i < children_count; ++i) {
- HeapEntry* child = entry->children_[i]->to();
if (child != this && child->not_painted_reachable_from_others()) {
- list2.Add(child);
+ list.Add(child);
child->PaintReachableFromOthers();
}
}
@@ -972,7 +979,8 @@
NonSharedSizeCalculator calculator;
snapshot_->IterateEntries(&calculator);
- return calculator.non_shared_total_size();
+ non_shared_total_size_ = calculator.non_shared_total_size();
+ return non_shared_total_size_;
}
@@ -1078,7 +1086,8 @@
void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize());
+ OS::Print("%6d %6d %6d [%ld] ",
+ self_size_, TotalSize(), NonSharedTotalSize(), id_);
if (type_ != STRING) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1244,7 +1253,13 @@
: collection_(collection),
title_(title),
uid_(uid),
- root_(this) {
+ root_(this),
+ sorted_entries_(NULL) {
+}
+
+
+HeapSnapshot::~HeapSnapshot() {
+ delete sorted_entries_;
}
@@ -1355,6 +1370,7 @@
HeapEntry* entry = new HeapEntry(this,
type,
name,
+ collection_->GetObjectId(object->address()),
GetObjectSize(object),
GetObjectSecurityToken(object));
entries_.Pair(object, entry);
@@ -1381,8 +1397,6 @@
}
-namespace {
-
class EdgesCutter {
public:
explicit EdgesCutter(int global_security_token)
@@ -1400,8 +1414,6 @@
const int global_security_token_;
};
-} // namespace
-
void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
EdgesCutter cutter(GetGlobalSecurityToken());
entries_.Apply(&cutter);
@@ -1454,13 +1466,129 @@
}
+class EntriesCollector {
+ public:
+ explicit EntriesCollector(List<HeapEntry*>* list) : list_(list) { }
+ void Apply(HeapEntry* entry) {
+ list_->Add(entry);
+ }
+ private:
+ List<HeapEntry*>* list_;
+};
+
+template<class T>
+static int SortByIds(const T* entry1_ptr,
+ const T* entry2_ptr) {
+ if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
+ return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
+}
+
+List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
+ if (sorted_entries_ != NULL) return sorted_entries_;
+ sorted_entries_ = new List<HeapEntry*>(entries_.capacity());
+ EntriesCollector collector(sorted_entries_);
+ entries_.Apply(&collector);
+ sorted_entries_->Sort(SortByIds);
+ return sorted_entries_;
+}
+
+
+HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
+ return collection_->CompareSnapshots(this, snapshot);
+}
+
+
void HeapSnapshot::Print(int max_depth) {
root_.Print(max_depth, 0);
}
+HeapObjectsMap::HeapObjectsMap()
+ : initial_fill_mode_(true),
+ next_id_(1),
+ entries_map_(AddressesMatch),
+ entries_(new List<EntryInfo>()) { }
+
+
+HeapObjectsMap::~HeapObjectsMap() {
+ delete entries_;
+}
+
+
+void HeapObjectsMap::SnapshotGenerationFinished() {
+ initial_fill_mode_ = false;
+ RemoveDeadEntries();
+}
+
+
+uint64_t HeapObjectsMap::FindObject(Address addr) {
+ if (!initial_fill_mode_) {
+ uint64_t existing = FindEntry(addr);
+ if (existing != 0) return existing;
+ }
+ uint64_t id = next_id_++;
+ AddEntry(addr, id);
+ return id;
+}
+
+
+void HeapObjectsMap::MoveObject(Address from, Address to) {
+ if (from == to) return;
+ HashMap::Entry* entry = entries_map_.Lookup(from, AddressHash(from), false);
+ if (entry != NULL) {
+ void* value = entry->value;
+ entries_map_.Remove(from, AddressHash(from));
+ entry = entries_map_.Lookup(to, AddressHash(to), true);
+ // We can have an entry at the new location, it is OK, as GC can overwrite
+ // dead objects with alive objects being moved.
+ entry->value = value;
+ }
+}
+
+
+void HeapObjectsMap::AddEntry(Address addr, uint64_t id) {
+ HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
+ ASSERT(entry->value == NULL);
+ entry->value = reinterpret_cast<void*>(entries_->length());
+ entries_->Add(EntryInfo(id));
+}
+
+
+uint64_t HeapObjectsMap::FindEntry(Address addr) {
+ HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
+ if (entry != NULL) {
+ int entry_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_->at(entry_index);
+ entry_info.accessed = true;
+ return entry_info.id;
+ } else {
+ return 0;
+ }
+}
+
+
+void HeapObjectsMap::RemoveDeadEntries() {
+ List<EntryInfo>* new_entries = new List<EntryInfo>();
+ for (HashMap::Entry* entry = entries_map_.Start();
+ entry != NULL;
+ entry = entries_map_.Next(entry)) {
+ int entry_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_->at(entry_index);
+ if (entry_info.accessed) {
+ entry->value = reinterpret_cast<void*>(new_entries->length());
+ new_entries->Add(EntryInfo(entry_info.id, false));
+ }
+ }
+ delete entries_;
+ entries_ = new_entries;
+}
+
+
HeapSnapshotsCollection::HeapSnapshotsCollection()
- : snapshots_uids_(HeapSnapshotsMatch),
+ : is_tracking_objects_(false),
+ snapshots_uids_(HeapSnapshotsMatch),
token_enumerator_(new TokenEnumerator()) {
}
@@ -1478,6 +1606,7 @@
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
unsigned uid) {
+ is_tracking_objects_ = true; // Start watching for heap objects moves.
HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
snapshots_.Add(snapshot);
HashMap::Entry* entry =
@@ -1498,6 +1627,13 @@
}
+HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
+ HeapSnapshot* snapshot1,
+ HeapSnapshot* snapshot2) {
+ return comparator_.Compare(snapshot1, snapshot2);
+}
+
+
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
: snapshot_(snapshot) {
}
@@ -1555,13 +1691,13 @@
JSFunction* func = JSFunction::cast(js_obj);
Context* context = func->context();
ZoneScope zscope(DELETE_ON_EXIT);
- ScopeInfo<ZoneListAllocationPolicy> scope_info(
- context->closure()->shared()->code());
- int locals_number = scope_info.NumberOfLocals();
+ SerializedScopeInfo* serialized_scope_info =
+ context->closure()->shared()->scope_info();
+ ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
+ int locals_number = zone_scope_info.NumberOfLocals();
for (int i = 0; i < locals_number; ++i) {
- String* local_name = *scope_info.LocalName(i);
- int idx = ScopeInfo<>::ContextSlotIndex(
- context->closure()->shared()->code(), local_name, NULL);
+ String* local_name = *zone_scope_info.LocalName(i);
+ int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
if (idx >= 0 && idx < context->length()) {
snapshot_->SetClosureReference(entry, local_name, context->get(idx));
}
@@ -1630,6 +1766,64 @@
}
}
+
+static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
+ delete *diff_ptr;
+}
+
+HeapSnapshotsComparator::~HeapSnapshotsComparator() {
+ diffs_.Iterate(DeleteHeapSnapshotsDiff);
+}
+
+
+HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
+ HeapSnapshot* snapshot2) {
+ HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
+ diffs_.Add(diff);
+ List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
+ List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
+ int i = 0, j = 0;
+ List<HeapEntry*> added_entries, deleted_entries;
+ while (i < entries1->length() && j < entries2->length()) {
+ uint64_t id1 = entries1->at(i)->id();
+ uint64_t id2 = entries2->at(j)->id();
+ if (id1 == id2) {
+ i++;
+ j++;
+ } else if (id1 < id2) {
+ HeapEntry* entry = entries1->at(i++);
+ deleted_entries.Add(entry);
+ } else {
+ HeapEntry* entry = entries2->at(j++);
+ added_entries.Add(entry);
+ }
+ }
+ while (i < entries1->length()) {
+ HeapEntry* entry = entries1->at(i++);
+ deleted_entries.Add(entry);
+ }
+ while (j < entries2->length()) {
+ HeapEntry* entry = entries2->at(j++);
+ added_entries.Add(entry);
+ }
+
+ snapshot1->ClearPaint();
+ snapshot1->root()->PaintAllReachable();
+ for (int i = 0; i < deleted_entries.length(); ++i) {
+ HeapEntry* entry = deleted_entries[i];
+ if (entry->painted_reachable())
+ diff->AddDeletedEntry(entry);
+ }
+ snapshot2->ClearPaint();
+ snapshot2->root()->PaintAllReachable();
+ for (int i = 0; i < added_entries.length(); ++i) {
+ HeapEntry* entry = added_entries[i];
+ if (entry->painted_reachable())
+ diff->AddAddedEntry(entry);
+ }
+ return diff;
+}
+
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 4e423c8..cd2bd0b 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -74,7 +74,7 @@
reinterpret_cast<char*>(key2)) == 0;
}
- // String::Hash -> const char*
+ // Mapping of strings by String::Hash to const char* strings.
HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
@@ -156,7 +156,7 @@
CodeEntry* entry_;
unsigned total_ticks_;
unsigned self_ticks_;
- // CodeEntry* -> ProfileNode*
+ // Mapping from CodeEntry* to ProfileNode*
HashMap children_;
List<ProfileNode*> children_list_;
@@ -312,11 +312,12 @@
}
StringsStorage function_and_resource_names_;
- // args_count -> char*
+ // Mapping from args_count (int) to char* strings.
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
List<List<CpuProfile*>* > profiles_by_token_;
- // uid -> index
+ // Mapping from profiles' uids to indexes in the second nested list
+ // of profiles_by_token_.
HashMap profiles_uids_;
// Accessed by VM thread and profile generator thread.
@@ -482,6 +483,7 @@
visited_(false),
type_(INTERNAL),
name_(""),
+ id_(0),
next_auto_index_(0),
self_size_(0),
security_token_id_(TokenEnumerator::kNoSecurityToken),
@@ -494,12 +496,14 @@
HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
+ uint64_t id,
int self_size,
int security_token_id)
: snapshot_(snapshot),
visited_(false),
type_(type),
name_(name),
+ id_(id),
next_auto_index_(1),
self_size_(self_size),
security_token_id_(security_token_id),
@@ -514,6 +518,7 @@
bool visited() const { return visited_; }
Type type() const { return type_; }
const char* name() const { return name_; }
+ uint64_t id() const { return id_; }
int self_size() const { return self_size_; }
int security_token_id() const { return security_token_id_; }
bool painted_reachable() { return painted_ == kPaintReachable; }
@@ -524,9 +529,13 @@
const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
const List<HeapGraphPath*>* GetRetainingPaths();
+ template<class Visitor>
+ void ApplyAndPaintAllReachable(Visitor* visitor);
+
void ClearPaint() { painted_ = kUnpainted; }
void CutEdges();
void MarkAsVisited() { visited_ = true; }
+ void PaintAllReachable();
void PaintReachable() {
ASSERT(painted_ == kUnpainted);
painted_ = kPaintReachable;
@@ -537,6 +546,7 @@
void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry);
+ void SetUnidirAutoIndexReference(HeapEntry* entry);
int TotalSize();
int NonSharedTotalSize();
@@ -557,6 +567,7 @@
bool visited_;
Type type_;
const char* name_;
+ uint64_t id_;
int next_auto_index_;
int self_size_;
int security_token_id_;
@@ -607,6 +618,8 @@
HeapEntry* Map(HeapObject* object);
void Pair(HeapObject* object, HeapEntry* entry);
+ uint32_t capacity() { return entries_.capacity(); }
+
private:
INLINE(uint32_t Hash(HeapObject* object)) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
@@ -627,6 +640,7 @@
class HeapSnapshotsCollection;
+class HeapSnapshotsDiff;
// HeapSnapshot represents a single heap snapshot. It is stored in
// HeapSnapshotsCollection, which is also a factory for
@@ -638,6 +652,7 @@
HeapSnapshot(HeapSnapshotsCollection* collection,
const char* title,
unsigned uid);
+ ~HeapSnapshot();
void ClearPaint();
void CutObjectsFromForeignSecurityContexts();
HeapEntry* GetEntry(Object* object);
@@ -655,6 +670,8 @@
HeapEntry* root() { return &root_; }
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
+ List<HeapEntry*>* GetSortedEntriesList();
+ HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
void Print(int max_depth);
@@ -679,19 +696,108 @@
const char* title_;
unsigned uid_;
HeapEntry root_;
- // HeapObject* -> HeapEntry*
+ // Mapping from HeapObject* pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
+ // Entries sorted by id.
+ List<HeapEntry*>* sorted_entries_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
};
+class HeapObjectsMap {
+ public:
+ HeapObjectsMap();
+ ~HeapObjectsMap();
+
+ void SnapshotGenerationFinished();
+ uint64_t FindObject(Address addr);
+ void MoveObject(Address from, Address to);
+
+ private:
+ struct EntryInfo {
+ explicit EntryInfo(uint64_t id) : id(id), accessed(true) { }
+ EntryInfo(uint64_t id, bool accessed) : id(id), accessed(accessed) { }
+ uint64_t id;
+ bool accessed;
+ };
+
+ void AddEntry(Address addr, uint64_t id);
+ uint64_t FindEntry(Address addr);
+ void RemoveDeadEntries();
+
+ static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t AddressHash(Address addr) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(addr));
+ }
+
+ bool initial_fill_mode_;
+ uint64_t next_id_;
+ HashMap entries_map_;
+ List<EntryInfo>* entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
+};
+
+
+class HeapSnapshotsDiff {
+ public:
+ HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
+ : snapshot1_(snapshot1),
+ snapshot2_(snapshot2),
+ additions_root_(new HeapEntry(snapshot2)),
+ deletions_root_(new HeapEntry(snapshot1)) { }
+
+ ~HeapSnapshotsDiff() {
+ delete deletions_root_;
+ delete additions_root_;
+ }
+
+ void AddAddedEntry(HeapEntry* entry) {
+ additions_root_->SetUnidirAutoIndexReference(entry);
+ }
+
+ void AddDeletedEntry(HeapEntry* entry) {
+ deletions_root_->SetUnidirAutoIndexReference(entry);
+ }
+
+ const HeapEntry* additions_root() const { return additions_root_; }
+ const HeapEntry* deletions_root() const { return deletions_root_; }
+
+ private:
+ HeapSnapshot* snapshot1_;
+ HeapSnapshot* snapshot2_;
+ HeapEntry* additions_root_;
+ HeapEntry* deletions_root_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
+};
+
+
+class HeapSnapshotsComparator {
+ public:
+ HeapSnapshotsComparator() { }
+ ~HeapSnapshotsComparator();
+ HeapSnapshotsDiff* Compare(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2);
+ private:
+ List<HeapSnapshotsDiff*> diffs_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsComparator);
+};
+
+
class HeapSnapshotsCollection {
public:
HeapSnapshotsCollection();
~HeapSnapshotsCollection();
+ bool is_tracking_objects() { return is_tracking_objects_; }
+
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
@@ -699,16 +805,26 @@
TokenEnumerator* token_enumerator() { return token_enumerator_; }
+ uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
+ void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+
+ HeapSnapshotsDiff* CompareSnapshots(HeapSnapshot* snapshot1,
+ HeapSnapshot* snapshot2);
+
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
return key1 == key2;
}
+ bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
- // uid -> HeapSnapshot*
+ // Mapping from snapshots' uids to HeapSnapshot* pointers.
HashMap snapshots_uids_;
StringsStorage names_;
TokenEnumerator* token_enumerator_;
+ // Mapping from HeapObject addresses to objects' uids.
+ HeapObjectsMap ids_;
+ HeapSnapshotsComparator comparator_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
diff --git a/src/runtime.cc b/src/runtime.cc
index 4a0fe7a..fa881eb 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1606,9 +1606,10 @@
if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- // Set the code, formal parameter count, and the length of the target
- // function.
+ // Set the code, scope info, formal parameter count,
+ // and the length of the target function.
target->set_code(fun->code());
+ target->shared()->set_scope_info(shared->scope_info());
target->shared()->set_length(shared->length());
target->shared()->set_formal_parameter_count(
shared->formal_parameter_count());
@@ -5608,6 +5609,14 @@
}
+static Object* Runtime_NumberAlloc(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 0);
+
+ return Heap::NumberFromDouble(9876543210.0);
+}
+
+
static Object* Runtime_NumberDiv(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -6860,7 +6869,7 @@
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, function, args[0]);
- int length = ScopeInfo<>::NumberOfContextSlots(function->code());
+ int length = function->shared()->scope_info()->NumberOfContextSlots();
Object* result = Heap::AllocateFunctionContext(length, function);
if (result->IsFailure()) return result;
@@ -8480,9 +8489,10 @@
// Check for constructor frame.
bool constructor = it.frame()->IsConstructor();
- // Get code and read scope info from it for local variable information.
- Handle<Code> code(it.frame()->code());
- ScopeInfo<> info(*code);
+ // Get scope info and read from it for local variable information.
+ Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
+ Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+ ScopeInfo<> info(*scope_info);
// Get the context.
Handle<Context> context(Context::cast(it.frame()->context()));
@@ -8510,8 +8520,7 @@
}
ASSERT(context->is_function_context());
locals->set(i * 2 + 1,
- context->get(ScopeInfo<>::ContextSlotIndex(*code, *name,
- NULL)));
+ context->get(scope_info->ContextSlotIndex(*name, NULL)));
}
}
@@ -8651,18 +8660,17 @@
// Copy all the context locals into an object used to materialize a scope.
-static void CopyContextLocalsToScopeObject(Handle<Code> code,
- ScopeInfo<>& scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
+static void CopyContextLocalsToScopeObject(
+ Handle<SerializedScopeInfo> serialized_scope_info,
+ ScopeInfo<>& scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
// Fill all context locals to the context extension.
for (int i = Context::MIN_CONTEXT_SLOTS;
i < scope_info.number_of_context_slots();
i++) {
- int context_index =
- ScopeInfo<>::ContextSlotIndex(*code,
- *scope_info.context_slot_name(i),
- NULL);
+ int context_index = serialized_scope_info->ContextSlotIndex(
+ *scope_info.context_slot_name(i), NULL);
// Don't include the arguments shadow (.arguments) context variable.
if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
@@ -8678,8 +8686,9 @@
// frame.
static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<Code> code(function->code());
- ScopeInfo<> scope_info(*code);
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
+ ScopeInfo<> scope_info(*serialized_scope_info);
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
@@ -8702,7 +8711,7 @@
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
- CopyContextLocalsToScopeObject(code, scope_info,
+ CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
function_context, local_scope);
// Finally copy any properties from the function context extension. This will
@@ -8729,8 +8738,9 @@
static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
ASSERT(context->is_function_context());
- Handle<Code> code(context->closure()->code());
- ScopeInfo<> scope_info(*code);
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
+ ScopeInfo<> scope_info(*serialized_scope_info);
// Allocate and initialize a JSObject with all the content of theis function
// closure.
@@ -8738,9 +8748,8 @@
// Check whether the arguments shadow object exists.
int arguments_shadow_index =
- ScopeInfo<>::ContextSlotIndex(*code,
- Heap::arguments_shadow_symbol(),
- NULL);
+ shared->scope_info()->ContextSlotIndex(Heap::arguments_shadow_symbol(),
+ NULL);
if (arguments_shadow_index >= 0) {
// In this case all the arguments are available in the arguments shadow
// object.
@@ -8754,7 +8763,8 @@
}
// Fill all context locals to the context extension.
- CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope);
+ CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ context, closure_scope);
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
@@ -8803,8 +8813,8 @@
// created for evaluating top level code and it is not a real local scope.
// Checking for the existence of .result seems fragile, but the scope info
// saved with the code object does not otherwise have that information.
- Handle<Code> code(function_->code());
- int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol());
+ int index = function_->shared()->scope_info()->
+ StackSlotIndex(Heap::result_symbol());
at_local_ = index < 0;
} else if (context_->is_function_context()) {
at_local_ = true;
@@ -8918,8 +8928,7 @@
case ScopeIterator::ScopeTypeLocal: {
PrintF("Local:\n");
- Handle<Code> code(function_->code());
- ScopeInfo<> scope_info(*code);
+ ScopeInfo<> scope_info(function_->shared()->scope_info());
scope_info.Print();
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
@@ -9443,7 +9452,7 @@
// Runtime_DebugEvaluate.
static Handle<Object> GetArgumentsObject(JavaScriptFrame* frame,
Handle<JSFunction> function,
- Handle<Code> code,
+ Handle<SerializedScopeInfo> scope_info,
const ScopeInfo<>* sinfo,
Handle<Context> function_context) {
// Try to find the value of 'arguments' to pass as parameter. If it is not
@@ -9451,15 +9460,14 @@
// does not support eval) then create an 'arguments' object.
int index;
if (sinfo->number_of_stack_slots() > 0) {
- index = ScopeInfo<>::StackSlotIndex(*code, Heap::arguments_symbol());
+ index = scope_info->StackSlotIndex(Heap::arguments_symbol());
if (index != -1) {
return Handle<Object>(frame->GetExpression(index));
}
}
if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
- index = ScopeInfo<>::ContextSlotIndex(*code, Heap::arguments_symbol(),
- NULL);
+ index = scope_info->ContextSlotIndex(Heap::arguments_symbol(), NULL);
if (index != -1) {
return Handle<Object>(function_context->get(index));
}
@@ -9510,8 +9518,8 @@
JavaScriptFrameIterator it(id);
JavaScriptFrame* frame = it.frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<Code> code(function->code());
- ScopeInfo<> sinfo(*code);
+ Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+ ScopeInfo<> sinfo(*scope_info);
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -9533,7 +9541,7 @@
Factory::NewFunction(Factory::empty_string(), Factory::undefined_value());
go_between->set_context(function->context());
#ifdef DEBUG
- ScopeInfo<> go_between_sinfo(go_between->shared()->code());
+ ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
ASSERT(go_between_sinfo.number_of_parameters() == 0);
ASSERT(go_between_sinfo.number_of_context_slots() == 0);
#endif
@@ -9579,8 +9587,8 @@
&has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- Handle<Object> arguments = GetArgumentsObject(frame, function, code, &sinfo,
- function_context);
+ Handle<Object> arguments = GetArgumentsObject(frame, function, scope_info,
+ &sinfo, function_context);
// Invoke the evaluation function and return the result.
const int argc = 2;
diff --git a/src/runtime.h b/src/runtime.h
index 5719fc8..1c9bb08 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -115,6 +115,7 @@
F(NumberDiv, 2, 1) \
F(NumberMod, 2, 1) \
F(NumberUnaryMinus, 1, 1) \
+ F(NumberAlloc, 0, 1) \
\
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index ab6e3e9..aca1945 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -80,7 +80,7 @@
} else {
// x is not a number, boolean, null or undefined.
if (y == null) return 1; // not equal
- if (IS_SPEC_OBJECT_OR_NULL(y)) {
+ if (IS_SPEC_OBJECT(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
@@ -345,7 +345,7 @@
// ECMA-262, section 11.8.7, page 54.
function IN(x) {
- if (x == null || !IS_SPEC_OBJECT_OR_NULL(x)) {
+ if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
@@ -363,13 +363,13 @@
}
// If V is not an object, return false.
- if (IS_NULL(V) || !IS_SPEC_OBJECT_OR_NULL(V)) {
+ if (!IS_SPEC_OBJECT(V)) {
return 1;
}
// Get the prototype of F; if it is not an object, throw an error.
var O = F.prototype;
- if (IS_NULL(O) || !IS_SPEC_OBJECT_OR_NULL(O)) {
+ if (!IS_SPEC_OBJECT(O)) {
throw %MakeTypeError('instanceof_nonobject_proto', [O]);
}
@@ -431,7 +431,7 @@
// big enough, but sanity check the value to avoid overflow when
// multiplying with pointer size.
if (length > 0x800000) {
- throw %MakeRangeError('apply_overflow', [length]);
+ throw %MakeRangeError('stack_overflow', []);
}
if (!IS_FUNCTION(this)) {
@@ -450,7 +450,7 @@
function APPLY_OVERFLOW(length) {
- throw %MakeRangeError('apply_overflow', [length]);
+ throw %MakeRangeError('stack_overflow', []);
}
@@ -483,8 +483,7 @@
// Fast case check.
if (IS_STRING(x)) return x;
// Normal behavior.
- if (!IS_SPEC_OBJECT_OR_NULL(x)) return x;
- if (x == null) return x; // check for null, undefined
+ if (!IS_SPEC_OBJECT(x)) return x;
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@@ -583,13 +582,10 @@
// Returns if the given x is a primitive value - not an object or a
// function.
function IsPrimitive(x) {
- if (!IS_SPEC_OBJECT_OR_NULL(x)) {
- return true;
- } else {
- // Even though the type of null is "object", null is still
- // considered a primitive value.
- return IS_NULL(x);
- }
+ // Even though the type of null is "object", null is still
+ // considered a primitive value. IS_SPEC_OBJECT handles this correctly
+ // (i.e., it will return false if x is null).
+ return !IS_SPEC_OBJECT(x);
}
diff --git a/src/scanner.cc b/src/scanner.cc
index 286f515..ca0e2d8 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -341,8 +341,7 @@
void Scanner::Initialize(Handle<String> source,
ParserLanguage language) {
- safe_string_input_buffer_.Reset(source.location());
- Init(source, &safe_string_input_buffer_, 0, source->length(), language);
+ Init(source, NULL, 0, source->length(), language);
}
@@ -357,9 +356,7 @@
int start_position,
int end_position,
ParserLanguage language) {
- safe_string_input_buffer_.Reset(source.location());
- Init(source, &safe_string_input_buffer_,
- start_position, end_position, language);
+ Init(source, NULL, start_position, end_position, language);
}
@@ -368,6 +365,10 @@
int start_position,
int end_position,
ParserLanguage language) {
+ // Either initialize the scanner from a character stream or from a
+ // string.
+ ASSERT(source.is_null() || stream == NULL);
+
// Initialize the source buffer.
if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
two_byte_string_buffer_.Initialize(
@@ -382,6 +383,10 @@
end_position);
source_ = &ascii_string_buffer_;
} else {
+ if (!source.is_null()) {
+ safe_string_input_buffer_.Reset(source.location());
+ stream = &safe_string_input_buffer_;
+ }
char_stream_buffer_.Initialize(source,
stream,
start_position,
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 2091ca7..7e7f152 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -148,7 +148,7 @@
}
-// Encoding format in the Code object:
+// Encoding format in a FixedArray object:
//
// - function name
//
@@ -204,12 +204,6 @@
}
-static inline Object** ReadSentinel(Object** p) {
- ASSERT(*p == NULL);
- return p + 1;
-}
-
-
template <class Allocator>
static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
ASSERT(list->is_empty());
@@ -220,7 +214,7 @@
p = ReadSymbol(p, &s);
list->Add(s);
}
- return ReadSentinel(p);
+ return p;
}
@@ -239,27 +233,27 @@
list->Add(s);
modes->Add(static_cast<Variable::Mode>(m));
}
- return ReadSentinel(p);
+ return p;
}
template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Code* code)
+ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
: function_name_(Factory::empty_symbol()),
parameters_(4),
stack_slots_(8),
context_slots_(8),
context_modes_(8) {
- if (code == NULL || code->sinfo_size() == 0) return;
-
- Object** p0 = &Memory::Object_at(code->sinfo_start());
- Object** p = p0;
- p = ReadSymbol(p, &function_name_);
- p = ReadBool(p, &calls_eval_);
- p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
- p = ReadList<Allocator>(p, ¶meters_);
- p = ReadList<Allocator>(p, &stack_slots_);
- ASSERT((p - p0) * kPointerSize == code->sinfo_size());
+ if (data->length() > 0) {
+ Object** p0 = data->data_start();
+ Object** p = p0;
+ p = ReadSymbol(p, &function_name_);
+ p = ReadBool(p, &calls_eval_);
+ p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
+ p = ReadList<Allocator>(p, ¶meters_);
+ p = ReadList<Allocator>(p, &stack_slots_);
+ ASSERT((p - p0) == FixedArray::cast(data)->length());
+ }
}
@@ -281,12 +275,6 @@
}
-static inline Object** WriteSentinel(Object** p) {
- *p++ = NULL;
- return p;
-}
-
-
template <class Allocator>
static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
const int n = list->length();
@@ -294,7 +282,7 @@
for (int i = 0; i < n; i++) {
p = WriteSymbol(p, list->at(i));
}
- return WriteSentinel(p);
+ return p;
}
@@ -308,222 +296,41 @@
p = WriteSymbol(p, list->at(i));
p = WriteInt(p, modes->at(i));
}
- return WriteSentinel(p);
+ return p;
}
template<class Allocator>
-int ScopeInfo<Allocator>::Serialize(Code* code) {
- // function name, calls eval, length & sentinel for 3 tables:
- const int extra_slots = 1 + 1 + 2 * 3;
- int size = (extra_slots +
- context_slots_.length() * 2 +
- parameters_.length() +
- stack_slots_.length()) * kPointerSize;
+Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
+ // function name, calls eval, length for 3 tables:
+ const int extra_slots = 1 + 1 + 3;
+ int length = extra_slots +
+ context_slots_.length() * 2 +
+ parameters_.length() +
+ stack_slots_.length();
- if (code != NULL) {
- CHECK(code->sinfo_size() == size);
- Object** p0 = &Memory::Object_at(code->sinfo_start());
- Object** p = p0;
- p = WriteSymbol(p, function_name_);
- p = WriteBool(p, calls_eval_);
- p = WriteList(p, &context_slots_, &context_modes_);
- p = WriteList(p, ¶meters_);
- p = WriteList(p, &stack_slots_);
- ASSERT((p - p0) * kPointerSize == size);
- }
+ Handle<SerializedScopeInfo> data(
+ SerializedScopeInfo::cast(*Factory::NewFixedArray(length, TENURED)));
+ AssertNoAllocation nogc;
- return size;
-}
+ Object** p0 = data->data_start();
+ Object** p = p0;
+ p = WriteSymbol(p, function_name_);
+ p = WriteBool(p, calls_eval_);
+ p = WriteList(p, &context_slots_, &context_modes_);
+ p = WriteList(p, ¶meters_);
+ p = WriteList(p, &stack_slots_);
+ ASSERT((p - p0) == length);
-
-template<class Allocator>
-void ScopeInfo<Allocator>::IterateScopeInfo(Code* code, ObjectVisitor* v) {
- Object** start = &Memory::Object_at(code->sinfo_start());
- Object** end = &Memory::Object_at(code->sinfo_start() + code->sinfo_size());
- v->VisitPointers(start, end);
-}
-
-
-static Object** ContextEntriesAddr(Code* code) {
- ASSERT(code->sinfo_size() > 0);
- // +2 for function name and calls eval:
- return &Memory::Object_at(code->sinfo_start()) + 2;
-}
-
-
-static Object** ParameterEntriesAddr(Code* code) {
- ASSERT(code->sinfo_size() > 0);
- Object** p = ContextEntriesAddr(code);
- int n; // number of context slots;
- p = ReadInt(p, &n);
- return p + n*2 + 1; // *2 for pairs, +1 for sentinel
-}
-
-
-static Object** StackSlotEntriesAddr(Code* code) {
- ASSERT(code->sinfo_size() > 0);
- Object** p = ParameterEntriesAddr(code);
- int n; // number of parameter slots;
- p = ReadInt(p, &n);
- return p + n + 1; // +1 for sentinel
-}
-
-
-template<class Allocator>
-bool ScopeInfo<Allocator>::CallsEval(Code* code) {
- if (code->sinfo_size() > 0) {
- // +1 for function name:
- Object** p = &Memory::Object_at(code->sinfo_start()) + 1;
- bool calls_eval;
- p = ReadBool(p, &calls_eval);
- return calls_eval;
- }
- return true;
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfStackSlots(Code* code) {
- if (code->sinfo_size() > 0) {
- Object** p = StackSlotEntriesAddr(code);
- int n; // number of stack slots;
- ReadInt(p, &n);
- return n;
- }
- return 0;
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfContextSlots(Code* code) {
- if (code->sinfo_size() > 0) {
- Object** p = ContextEntriesAddr(code);
- int n; // number of context slots;
- ReadInt(p, &n);
- return n + Context::MIN_CONTEXT_SLOTS;
- }
- return 0;
-}
-
-
-template<class Allocator>
-bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) {
- if (code->sinfo_size() > 0) {
- Object** p = ContextEntriesAddr(code);
- int n; // number of context slots;
- ReadInt(p, &n);
- return n > 0;
- }
- return false;
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
- ASSERT(name->IsSymbol());
- if (code->sinfo_size() > 0) {
- // Loop below depends on the NULL sentinel after the stack slot names.
- ASSERT(NumberOfStackSlots(code) > 0 ||
- *(StackSlotEntriesAddr(code) + 1) == NULL);
- // slots start after length entry
- Object** p0 = StackSlotEntriesAddr(code) + 1;
- Object** p = p0;
- while (*p != NULL) {
- if (*p == name) return static_cast<int>(p - p0);
- p++;
- }
- }
- return -1;
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
- String* name,
- Variable::Mode* mode) {
- ASSERT(name->IsSymbol());
- int result = ContextSlotCache::Lookup(code, name, mode);
- if (result != ContextSlotCache::kNotFound) return result;
- if (code->sinfo_size() > 0) {
- // Loop below depends on the NULL sentinel after the context slot names.
- ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
- *(ContextEntriesAddr(code) + 1) == NULL);
-
- // slots start after length entry
- Object** p0 = ContextEntriesAddr(code) + 1;
- Object** p = p0;
- // contexts may have no variable slots (in the presence of eval()).
- while (*p != NULL) {
- if (*p == name) {
- ASSERT(((p - p0) & 1) == 0);
- int v;
- ReadInt(p + 1, &v);
- Variable::Mode mode_value = static_cast<Variable::Mode>(v);
- if (mode != NULL) *mode = mode_value;
- result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
- ContextSlotCache::Update(code, name, mode_value, result);
- return result;
- }
- p += 2;
- }
- }
- ContextSlotCache::Update(code, name, Variable::INTERNAL, -1);
- return -1;
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) {
- ASSERT(name->IsSymbol());
- if (code->sinfo_size() > 0) {
- // We must read parameters from the end since for
- // multiply declared parameters the value of the
- // last declaration of that parameter is used
- // inside a function (and thus we need to look
- // at the last index). Was bug# 1110337.
- //
- // Eventually, we should only register such parameters
- // once, with corresponding index. This requires a new
- // implementation of the ScopeInfo code. See also other
- // comments in this file regarding this.
- Object** p = ParameterEntriesAddr(code);
- int n; // number of parameters
- Object** p0 = ReadInt(p, &n);
- p = p0 + n;
- while (p > p0) {
- p--;
- if (*p == name) return static_cast<int>(p - p0);
- }
- }
- return -1;
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::FunctionContextSlotIndex(Code* code, String* name) {
- ASSERT(name->IsSymbol());
- if (code->sinfo_size() > 0) {
- Object** p = &Memory::Object_at(code->sinfo_start());
- if (*p == name) {
- p = ContextEntriesAddr(code);
- int n; // number of context slots
- ReadInt(p, &n);
- ASSERT(n != 0);
- // The function context slot is the last entry.
- return n + Context::MIN_CONTEXT_SLOTS - 1;
- }
- }
- return -1;
+ return data;
}
template<class Allocator>
Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
// A local variable can be allocated either on the stack or in the context.
- // For variables allocated in the context they are always preceded by the
- // number Context::MIN_CONTEXT_SLOTS number of fixed allocated slots in the
- // context.
+ // For variables allocated in the context they are always preceded by
+ // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
if (i < number_of_stack_slots()) {
return stack_slot_name(i);
} else {
@@ -544,20 +351,189 @@
}
-int ContextSlotCache::Hash(Code* code, String* name) {
+Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
+ ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
+ return sinfo.Serialize();
+}
+
+
+SerializedScopeInfo* SerializedScopeInfo::Empty() {
+ return reinterpret_cast<SerializedScopeInfo*>(Heap::empty_fixed_array());
+}
+
+
+Object** SerializedScopeInfo::ContextEntriesAddr() {
+ ASSERT(length() > 0);
+ return data_start() + 2; // +2 for function name and calls eval.
+}
+
+
+Object** SerializedScopeInfo::ParameterEntriesAddr() {
+ ASSERT(length() > 0);
+ Object** p = ContextEntriesAddr();
+ int number_of_context_slots;
+ p = ReadInt(p, &number_of_context_slots);
+ return p + number_of_context_slots*2; // *2 for pairs
+}
+
+
+Object** SerializedScopeInfo::StackSlotEntriesAddr() {
+ ASSERT(length() > 0);
+ Object** p = ParameterEntriesAddr();
+ int number_of_parameter_slots;
+ p = ReadInt(p, &number_of_parameter_slots);
+ return p + number_of_parameter_slots;
+}
+
+
+bool SerializedScopeInfo::CallsEval() {
+ if (length() > 0) {
+ Object** p = data_start() + 1; // +1 for function name.
+ bool calls_eval;
+ p = ReadBool(p, &calls_eval);
+ return calls_eval;
+ }
+ return true;
+}
+
+
+int SerializedScopeInfo::NumberOfStackSlots() {
+ if (length() > 0) {
+ Object** p = StackSlotEntriesAddr();
+ int number_of_stack_slots;
+ ReadInt(p, &number_of_stack_slots);
+ return number_of_stack_slots;
+ }
+ return 0;
+}
+
+
+int SerializedScopeInfo::NumberOfContextSlots() {
+ if (length() > 0) {
+ Object** p = ContextEntriesAddr();
+ int number_of_context_slots;
+ ReadInt(p, &number_of_context_slots);
+ return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
+ }
+ return 0;
+}
+
+
+bool SerializedScopeInfo::HasHeapAllocatedLocals() {
+ if (length() > 0) {
+ Object** p = ContextEntriesAddr();
+ int number_of_context_slots;
+ ReadInt(p, &number_of_context_slots);
+ return number_of_context_slots > 0;
+ }
+ return false;
+}
+
+
+int SerializedScopeInfo::StackSlotIndex(String* name) {
+ ASSERT(name->IsSymbol());
+ if (length() > 0) {
+ // Slots start after length entry.
+ Object** p0 = StackSlotEntriesAddr();
+ int number_of_stack_slots;
+ p0 = ReadInt(p0, &number_of_stack_slots);
+ Object** p = p0;
+ Object** end = p0 + number_of_stack_slots;
+ while (p != end) {
+ if (*p == name) return static_cast<int>(p - p0);
+ p++;
+ }
+ }
+ return -1;
+}
+
+int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
+ ASSERT(name->IsSymbol());
+ int result = ContextSlotCache::Lookup(this, name, mode);
+ if (result != ContextSlotCache::kNotFound) return result;
+ if (length() > 0) {
+ // Slots start after length entry.
+ Object** p0 = ContextEntriesAddr();
+ int number_of_context_slots;
+ p0 = ReadInt(p0, &number_of_context_slots);
+ Object** p = p0;
+ Object** end = p0 + number_of_context_slots * 2;
+ while (p != end) {
+ if (*p == name) {
+ ASSERT(((p - p0) & 1) == 0);
+ int v;
+ ReadInt(p + 1, &v);
+ Variable::Mode mode_value = static_cast<Variable::Mode>(v);
+ if (mode != NULL) *mode = mode_value;
+ result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ ContextSlotCache::Update(this, name, mode_value, result);
+ return result;
+ }
+ p += 2;
+ }
+ }
+ ContextSlotCache::Update(this, name, Variable::INTERNAL, -1);
+ return -1;
+}
+
+
+int SerializedScopeInfo::ParameterIndex(String* name) {
+ ASSERT(name->IsSymbol());
+ if (length() > 0) {
+ // We must read parameters from the end since for
+ // multiply declared parameters the value of the
+ // last declaration of that parameter is used
+ // inside a function (and thus we need to look
+ // at the last index). Was bug# 1110337.
+ //
+ // Eventually, we should only register such parameters
+ // once, with corresponding index. This requires a new
+ // implementation of the ScopeInfo code. See also other
+ // comments in this file regarding this.
+ Object** p = ParameterEntriesAddr();
+ int number_of_parameter_slots;
+ Object** p0 = ReadInt(p, &number_of_parameter_slots);
+ p = p0 + number_of_parameter_slots;
+ while (p > p0) {
+ p--;
+ if (*p == name) return static_cast<int>(p - p0);
+ }
+ }
+ return -1;
+}
+
+
+int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
+ ASSERT(name->IsSymbol());
+ if (length() > 0) {
+ Object** p = data_start();
+ if (*p == name) {
+ p = ContextEntriesAddr();
+ int number_of_context_slots;
+ ReadInt(p, &number_of_context_slots);
+ ASSERT(number_of_context_slots != 0);
+ // The function context slot is the last entry.
+ return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
+ }
+ }
+ return -1;
+}
+
+
+int ContextSlotCache::Hash(Object* data, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
}
-int ContextSlotCache::Lookup(Code* code,
+int ContextSlotCache::Lookup(Object* data,
String* name,
Variable::Mode* mode) {
- int index = Hash(code, name);
+ int index = Hash(data, name);
Key& key = keys_[index];
- if ((key.code == code) && key.name->Equals(name)) {
+ if ((key.data == data) && key.name->Equals(name)) {
Value result(values_[index]);
if (mode != NULL) *mode = result.mode();
return result.index() + kNotFound;
@@ -566,28 +542,28 @@
}
-void ContextSlotCache::Update(Code* code,
+void ContextSlotCache::Update(Object* data,
String* name,
Variable::Mode mode,
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
if (Heap::LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(code, symbol);
+ int index = Hash(data, symbol);
Key& key = keys_[index];
- key.code = code;
+ key.data = data;
key.name = symbol;
// Please note value only takes a uint as index.
values_[index] = Value(mode, slot_index - kNotFound).raw();
#ifdef DEBUG
- ValidateEntry(code, name, mode, slot_index);
+ ValidateEntry(data, name, mode, slot_index);
#endif
}
}
void ContextSlotCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].code = NULL;
+ for (int index = 0; index < kLength; index++) keys_[index].data = NULL;
}
@@ -599,15 +575,15 @@
#ifdef DEBUG
-void ContextSlotCache::ValidateEntry(Code* code,
+void ContextSlotCache::ValidateEntry(Object* data,
String* name,
Variable::Mode mode,
int slot_index) {
String* symbol;
if (Heap::LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(code, name);
+ int index = Hash(data, name);
Key& key = keys_[index];
- ASSERT(key.code == code);
+ ASSERT(key.data == data);
ASSERT(key.name->Equals(name));
Value result(values_[index]);
ASSERT(result.mode() == mode);
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 9fb26d0..0fdab56 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -37,7 +37,7 @@
// Scope information represents information about a functions's
// scopes (currently only one, because we don't do any inlining)
// and the allocation of the scope's variables. Scope information
-// is stored in a compressed form with Code objects and is used
+// is stored in a compressed form in FixedArray objects and is used
// at runtime (stack dumps, deoptimization, etc.).
//
// Historical note: In other VMs built by this team, ScopeInfo was
@@ -54,23 +54,11 @@
// Create a ScopeInfo instance from a scope.
explicit ScopeInfo(Scope* scope);
- // Create a ScopeInfo instance from a Code object.
- explicit ScopeInfo(Code* code);
+ // Create a ScopeInfo instance from SerializedScopeInfo.
+ explicit ScopeInfo(SerializedScopeInfo* data);
- // Write the ScopeInfo data into a Code object, and returns the
- // amount of space that was needed. If no Code object is provided
- // (NULL handle), Serialize() only returns the amount of space needed.
- //
- // This operations requires that the Code object has the correct amount
- // of space for the ScopeInfo data; otherwise the operation fails (fatal
- // error). Any existing scope info in the Code object is simply overwritten.
- int Serialize(Code* code);
-
- // Garbage collection support for scope info embedded in Code objects.
- // This code is in ScopeInfo because only here we should have to know
- // about the encoding.
- static void IterateScopeInfo(Code* code, ObjectVisitor* v);
-
+ // Creates a SerializedScopeInfo holding the serialized scope info.
+ Handle<SerializedScopeInfo> Serialize();
// --------------------------------------------------------------------------
// Lookup
@@ -95,51 +83,6 @@
int NumberOfLocals() const;
// --------------------------------------------------------------------------
- // The following functions provide quick access to scope info details
- // for runtime routines w/o the need to explicitly create a ScopeInfo
- // object.
- //
- // ScopeInfo is the only class which should have to know about the
- // encoding of it's information in a Code object, which is why these
- // functions are in this class.
-
- // Does this scope call eval.
- static bool CallsEval(Code* code);
-
- // Return the number of stack slots for code.
- static int NumberOfStackSlots(Code* code);
-
- // Return the number of context slots for code.
- static int NumberOfContextSlots(Code* code);
-
- // Return if this has context slots besides MIN_CONTEXT_SLOTS;
- static bool HasHeapAllocatedLocals(Code* code);
-
- // Lookup support for scope info embedded in Code objects. Returns
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be a symbol
- // (canonicalized).
- static int StackSlotIndex(Code* code, String* name);
-
- // Lookup support for scope info embedded in Code objects. Returns the
- // context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be a symbol (canonicalized).
- // If the slot is present and mode != NULL, sets *mode to the corresponding
- // mode for that variable.
- static int ContextSlotIndex(Code* code, String* name, Variable::Mode* mode);
-
- // Lookup support for scope info embedded in Code objects. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be a symbol (canonicalized).
- static int ParameterIndex(Code* code, String* name);
-
- // Lookup support for scope info embedded in Code objects. Returns the
- // function context slot index if the function name is present (named
- // function expressions, only), otherwise returns a value < 0. The name
- // must be a symbol (canonicalized).
- static int FunctionContextSlotIndex(Code* code, String* name);
-
- // --------------------------------------------------------------------------
// Debugging support
#ifdef DEBUG
@@ -155,32 +98,82 @@
List<Variable::Mode, Allocator > context_modes_;
};
-class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
- public:
- // Create a ZoneScopeInfo instance from a scope.
- explicit ZoneScopeInfo(Scope* scope)
- : ScopeInfo<ZoneListAllocationPolicy>(scope) {}
- // Create a ZoneScopeInfo instance from a Code object.
- explicit ZoneScopeInfo(Code* code)
- : ScopeInfo<ZoneListAllocationPolicy>(code) {}
+// This object provides quick access to scope info details for runtime
+// routines w/o the need to explicitly create a ScopeInfo object.
+class SerializedScopeInfo : public FixedArray {
+ public :
+
+ static SerializedScopeInfo* cast(Object* object) {
+ ASSERT(object->IsFixedArray());
+ return reinterpret_cast<SerializedScopeInfo*>(object);
+ }
+
+ // Does this scope call eval.
+ bool CallsEval();
+
+ // Return the number of stack slots for code.
+ int NumberOfStackSlots();
+
+ // Return the number of context slots for code.
+ int NumberOfContextSlots();
+
+ // Return if this has context slots besides MIN_CONTEXT_SLOTS;
+ bool HasHeapAllocatedLocals();
+
+ // Lookup support for serialized scope info. Returns the
+ // the stack slot index for a given slot name if the slot is
+ // present; otherwise returns a value < 0. The name must be a symbol
+ // (canonicalized).
+ int StackSlotIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // context slot index for a given slot name if the slot is present; otherwise
+ // returns a value < 0. The name must be a symbol (canonicalized).
+ // If the slot is present and mode != NULL, sets *mode to the corresponding
+ // mode for that variable.
+ int ContextSlotIndex(String* name, Variable::Mode* mode);
+
+ // Lookup support for serialized scope info. Returns the
+ // parameter index for a given parameter name if the parameter is present;
+ // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+ int ParameterIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // function context slot index if the function name is present (named
+ // function expressions, only), otherwise returns a value < 0. The name
+ // must be a symbol (canonicalized).
+ int FunctionContextSlotIndex(String* name);
+
+ static Handle<SerializedScopeInfo> Create(Scope* scope);
+
+ // Serializes empty scope info.
+ static SerializedScopeInfo* Empty();
+
+ private:
+
+ inline Object** ContextEntriesAddr();
+
+ inline Object** ParameterEntriesAddr();
+
+ inline Object** StackSlotEntriesAddr();
};
-// Cache for mapping (code, property name) into context slot index.
+// Cache for mapping (data, property name) into context slot index.
// The cache contains both positive and negative results.
// Slot index equals -1 means the property is absent.
// Cleared at startup and prior to mark sweep collection.
class ContextSlotCache {
public:
- // Lookup context slot index for (code, name).
+ // Lookup context slot index for (data, name).
// If absent, kNotFound is returned.
- static int Lookup(Code* code,
+ static int Lookup(Object* data,
String* name,
Variable::Mode* mode);
// Update an element in the cache.
- static void Update(Code* code,
+ static void Update(Object* data,
String* name,
Variable::Mode mode,
int slot_index);
@@ -190,10 +183,10 @@
static const int kNotFound = -2;
private:
- inline static int Hash(Code* code, String* name);
+ inline static int Hash(Object* data, String* name);
#ifdef DEBUG
- static void ValidateEntry(Code* code,
+ static void ValidateEntry(Object* data,
String* name,
Variable::Mode mode,
int slot_index);
@@ -201,7 +194,7 @@
static const int kLength = 256;
struct Key {
- Code* code;
+ Object* data;
String* name;
};
diff --git a/src/serialize.cc b/src/serialize.cc
index a6a516a..0e283f4 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -360,6 +360,7 @@
UNCLASSIFIED,
5,
"StackGuard::address_of_real_jslimit()");
+#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::address_of_regexp_stack_limit().address(),
UNCLASSIFIED,
6,
@@ -376,6 +377,7 @@
UNCLASSIFIED,
9,
"OffsetsVector::static_offsets_vector");
+#endif // V8_INTERPRETED_REGEXP
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
10,
@@ -673,6 +675,14 @@
LOG(SnapshotPositionEvent(address, source_->position()));
}
ReadChunk(current, limit, space_number, address);
+
+ if (space == Heap::map_space()) {
+ ASSERT(size == Map::kSize);
+ HeapObject* obj = HeapObject::FromAddress(address);
+ Map* map = reinterpret_cast<Map*>(obj);
+ map->set_scavenger(Heap::GetScavenger(map->instance_type(),
+ map->instance_size()));
+ }
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index a654a08..bc29d06 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1186,7 +1186,7 @@
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
- Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject());
+ Object* result = Heap::CreateCode(desc, flags, masm_.CodeObject());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs && !result->IsFailure()) {
Code::cast(result)->Disassemble(name);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 856904a..8c00ee8 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -429,23 +429,23 @@
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
- Label* miss,
- Register extra = no_reg) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
- name, kInvalidProtoDepth, miss, extra);
+ Label* miss) {
+ return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
+ scratch2, name, kInvalidProtoDepth, miss);
}
Register CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra = no_reg);
+ Label* miss);
protected:
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
@@ -459,6 +459,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss);
@@ -469,6 +470,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -479,6 +481,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss);
@@ -490,6 +493,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss);
diff --git a/src/top.cc b/src/top.cc
index 516ec67..2887b76 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -44,6 +44,11 @@
NoAllocationStringAllocator* preallocated_message_space = NULL;
+bool capture_stack_trace_for_uncaught_exceptions = false;
+int stack_trace_for_uncaught_exceptions_frame_limit = 0;
+StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options =
+ StackTrace::kOverview;
+
Address top_addresses[] = {
#define C(name) reinterpret_cast<Address>(Top::name()),
TOP_ADDRESS_LIST(C)
@@ -365,9 +370,8 @@
}
-Local<StackTrace> Top::CaptureCurrentStackTrace(
+Handle<JSArray> Top::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
- v8::HandleScope scope;
// Ensure no negative values.
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
@@ -443,7 +447,7 @@
}
stack_trace->set_length(Smi::FromInt(frames_seen));
- return scope.Close(Utils::StackTraceToLocal(stack_trace));
+ return stack_trace;
}
@@ -681,10 +685,7 @@
// TODO(1240995): To avoid having to call JavaScript code to compute
// the message for stack overflow exceptions which is very likely to
// double fault with another stack overflow exception, we use a
- // precomputed message. This is somewhat problematic in that it
- // doesn't use ReportUncaughtException to determine the location
- // from where the exception occurred. It should probably be
- // reworked.
+ // precomputed message.
DoThrow(*exception, NULL, kStackOverflowMessage);
return Failure::Exception();
}
@@ -778,25 +779,6 @@
}
-void Top::ReportUncaughtException(Handle<Object> exception,
- MessageLocation* location,
- Handle<String> stack_trace) {
- Handle<Object> message;
- if (!Bootstrapper::IsActive()) {
- // It's not safe to try to make message objects while the bootstrapper
- // is active since the infrastructure may not have been properly
- // initialized.
- message =
- MessageHandler::MakeMessageObject("uncaught_exception",
- location,
- HandleVector<Object>(&exception, 1),
- stack_trace);
- }
- // Report the uncaught exception.
- MessageHandler::ReportMessage(location, message);
-}
-
-
bool Top::ShouldReturnException(bool* is_caught_externally,
bool catchable_by_javascript) {
// Find the top-most try-catch handler.
@@ -869,8 +851,15 @@
// may not have been properly initialized.
Handle<String> stack_trace;
if (FLAG_trace_exception) stack_trace = StackTraceString();
+ Handle<JSArray> stack_trace_object;
+ if (report_exception && capture_stack_trace_for_uncaught_exceptions) {
+ stack_trace_object = Top::CaptureCurrentStackTrace(
+ stack_trace_for_uncaught_exceptions_frame_limit,
+ stack_trace_for_uncaught_exceptions_options);
+ }
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
- location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+ location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+ stack_trace_object);
}
}
@@ -997,6 +986,16 @@
}
+void Top::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
+ capture_stack_trace_for_uncaught_exceptions = capture;
+ stack_trace_for_uncaught_exceptions_frame_limit = frame_limit;
+ stack_trace_for_uncaught_exceptions_options = options;
+}
+
+
bool Top::is_out_of_memory() {
if (has_pending_exception()) {
Object* e = pending_exception();
diff --git a/src/top.h b/src/top.h
index 4a76a7f..8733393 100644
--- a/src/top.h
+++ b/src/top.h
@@ -227,6 +227,11 @@
(try_catch_handler() == thread_local_.catcher_);
}
+ static void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options);
+
// Tells whether the current context has experienced an out of memory
// exception.
static bool is_out_of_memory();
@@ -266,7 +271,7 @@
static void PrintStack(StringStream* accumulator);
static void PrintStack();
static Handle<String> StackTraceString();
- static Local<StackTrace> CaptureCurrentStackTrace(
+ static Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
@@ -302,9 +307,6 @@
const char* message);
static bool ShouldReturnException(bool* is_caught_externally,
bool catchable_by_javascript);
- static void ReportUncaughtException(Handle<Object> exception,
- MessageLocation* location,
- Handle<String> stack_trace);
// Attempts to compute the current source location, storing the
// result in the target out parameter.
diff --git a/src/v8natives.js b/src/v8natives.js
index 487faab..198cecc 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -225,16 +225,14 @@
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
- if (!IS_SPEC_OBJECT_OR_NULL(V) && !IS_UNDETECTABLE(V)) return false;
+ if (!IS_SPEC_OBJECT(V)) return false;
return %IsInPrototypeChain(this, V);
}
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
- if (this == null) return false;
- if (!IS_SPEC_OBJECT_OR_NULL(this)) return false;
- return %IsPropertyEnumerable(this, ToString(V));
+ return %IsPropertyEnumerable(ToObject(this), ToString(V));
}
@@ -279,8 +277,7 @@
function ObjectKeys(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
return %LocalKeys(obj);
}
@@ -329,7 +326,7 @@
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
- if (!IS_SPEC_OBJECT_OR_NULL(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("property_desc_object", [obj]);
}
var desc = new PropertyDescriptor();
@@ -626,8 +623,7 @@
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
return obj.__proto__;
}
@@ -635,8 +631,7 @@
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
var desc = GetOwnProperty(obj, p);
return FromPropertyDescriptor(desc);
@@ -645,8 +640,7 @@
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
// Find all the indexed properties.
@@ -698,7 +692,7 @@
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
- if (!IS_SPEC_OBJECT_OR_NULL(proto)) {
+ if (!IS_SPEC_OBJECT(proto) && proto !== null) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();
@@ -710,8 +704,7 @@
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
}
var name = ToString(p);
@@ -723,8 +716,7 @@
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj))
+ if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
var props = ToObject(properties);
var key_values = [];
@@ -745,10 +737,42 @@
}
+// ES5 section 15.2.3.8.
+function ObjectSeal(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (desc.isConfigurable()) desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
+ ObjectPreventExtension(obj);
+}
+
+
+// ES5 section 15.2.3.9.
+function ObjectFreeze(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (IsDataDescriptor(desc)) desc.setWritable(false);
+ if (desc.isConfigurable()) desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
+ ObjectPreventExtension(obj);
+}
+
+
// ES5 section 15.2.3.10
function ObjectPreventExtension(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
%PreventExtensions(obj);
@@ -756,10 +780,46 @@
}
+// ES5 section 15.2.3.11
+function ObjectIsSealed(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (desc.isConfigurable()) return false;
+ }
+ if (!ObjectIsExtensible(obj)) {
+ return true;
+ }
+ return false;
+}
+
+
+// ES5 section 15.2.3.12
+function ObjectIsFrozen(obj) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
+ }
+ var names = ObjectGetOwnPropertyNames(obj);
+ for (var key in names) {
+ var name = names[key];
+ var desc = GetOwnProperty(obj, name);
+ if (IsDataDescriptor(desc) && desc.isWritable()) return false;
+ if (desc.isConfigurable()) return false;
+ }
+ if (!ObjectIsExtensible(obj)) {
+ return true;
+ }
+ return false;
+}
+
+
// ES5 section 15.2.3.13
function ObjectIsExtensible(obj) {
- if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
- !IS_UNDETECTABLE(obj)) {
+ if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
return %IsExtensible(obj);
@@ -799,11 +859,15 @@
"create", ObjectCreate,
"defineProperty", ObjectDefineProperty,
"defineProperties", ObjectDefineProperties,
+ "freeze", ObjectFreeze,
"getPrototypeOf", ObjectGetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
"isExtensible", ObjectIsExtensible,
- "preventExtensions", ObjectPreventExtension
+ "isFrozen", ObjectIsFrozen,
+ "isSealed", ObjectIsSealed,
+ "preventExtensions", ObjectPreventExtension,
+ "seal", ObjectSeal
));
}
diff --git a/src/version.cc b/src/version.cc
index d930c8d..bf5feb1 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 2
-#define BUILD_NUMBER 23
+#define MINOR_VERSION 3
+#define BUILD_NUMBER 1
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 4df2cfd..aa4cedb 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -74,8 +74,10 @@
if (state == EXTERNAL) state = OTHER;
#endif
state_ = state;
- previous_ = current_state_; // Save the previous state.
- current_state_ = this; // Install the new state.
+ // Save the previous state.
+ previous_ = reinterpret_cast<VMState*>(current_state_);
+ // Install the new state.
+ OS::ReleaseStore(¤t_state_, reinterpret_cast<AtomicWord>(this));
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
@@ -103,7 +105,8 @@
VMState::~VMState() {
if (disabled_) return;
- current_state_ = previous_; // Return to the previous state.
+ // Return to the previous state.
+ OS::ReleaseStore(¤t_state_, reinterpret_cast<AtomicWord>(previous_));
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
diff --git a/src/vm-state.cc b/src/vm-state.cc
index 3859efb..6bd737d 100644
--- a/src/vm-state.cc
+++ b/src/vm-state.cc
@@ -33,7 +33,7 @@
namespace internal {
#ifdef ENABLE_VMSTATE_TRACKING
-VMState* VMState::current_state_ = NULL;
+AtomicWord VMState::current_state_ = 0;
#endif
} } // namespace v8::internal
diff --git a/src/vm-state.h b/src/vm-state.h
index 241df4c..080eb8d 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -44,15 +44,17 @@
// Used for debug asserts.
static bool is_outermost_external() {
- return current_state_ == NULL;
+ return current_state_ == 0;
}
static StateTag current_state() {
- return current_state_ ? current_state_->state() : EXTERNAL;
+ VMState* state = reinterpret_cast<VMState*>(current_state_);
+ return state ? state->state() : EXTERNAL;
}
static Address external_callback() {
- return current_state_ ? current_state_->external_callback_ : NULL;
+ VMState* state = reinterpret_cast<VMState*>(current_state_);
+ return state ? state->external_callback_ : NULL;
}
private:
@@ -62,7 +64,7 @@
Address external_callback_;
// A stack of VM states.
- static VMState* current_state_;
+ static AtomicWord current_state_;
#else
public:
explicit VMState(StateTag state) {}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index c19e2ba..c66666a 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -119,7 +119,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>());
if (!code->IsCode()) return;
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 7e04c20..b41fb74 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -140,149 +140,6 @@
// -------------------------------------------------------------------------
-// Deferred code objects
-//
-// These subclasses of DeferredCode add pieces of code to the end of generated
-// code. They are branched to from the generated code, and
-// keep some slower code out of the main body of the generated code.
-// Many of them call a code stub or a runtime function.
-
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-};
-
-
-// -----------------------------------------------------------------------------
// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
@@ -298,20 +155,11 @@
}
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(rsi); // The context is the first argument.
- frame_->EmitPush(kScratchRegister);
- frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
+// Calling conventions:
+// rbp: caller's frame pointer
+// rsp: stack pointer
+// rdi: called JS function
+// rsi: callee's context
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
@@ -329,7 +177,7 @@
// Adjust for function-level loop nesting.
ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ += info->loop_nesting();
+ loop_nesting_ = info->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
@@ -543,209 +391,2105 @@
allocator_ = NULL;
}
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to rax. This is safe because the current frame does not
- // contain a reference to rax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(rax);
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->ParameterAt(index);
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
- // with length 7 (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(rsi)); // do not overwrite context register
+ Register context = rsi;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = 0; i < chain_length; i++) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(rsp, 0);
}
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- DeleteFrame();
}
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
- && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
- && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
- && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
- && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
- && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
- && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
- && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
- && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+ Result tmp,
+ JumpTarget* slow) {
+ ASSERT(slot->type() == Slot::CONTEXT);
+ ASSERT(tmp.is_register());
+ Register context = rsi;
+
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ }
+ // Check that last extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp.reg(), slot->index());
}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* expr,
+ ControlDestination* dest,
+ bool force_control) {
+ ASSERT(!in_spilled_code());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, dest);
+ Visit(expr);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ !dest->is_used() &&
+ frame_->height() == original_height) {
+ dest->Goto(true);
+ }
+ }
+
+ if (force_control && !dest->is_used()) {
+ // Convert the TOS value into flow to the control destination.
+ ToBoolean(dest);
+ }
+
+ ASSERT(!(force_control && !dest->is_used()));
+ ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* expr) {
+#ifdef DEBUG
+ int original_height = frame_->height();
#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target;
+ JumpTarget false_target;
+ ControlDestination dest(&true_target, &false_target, true);
+ LoadCondition(expr, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The false target was just bound.
+ JumpTarget loaded;
+ frame_->Push(Factory::false_value());
+ // There may be dangling jumps to the true target.
+ if (true_target.is_linked()) {
+ loaded.Jump();
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ loaded.Bind();
+ }
+
+ } else if (dest.is_used()) {
+ // There is true, and possibly false, control flow (with true as
+ // the fall through).
+ JumpTarget loaded;
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ loaded.Bind();
+ }
+
+ } else {
+ // We have a valid value on top of the frame, but we still may
+ // have dangling jumps to the true and false targets from nested
+ // subexpressions (eg, the left subexpressions of the
+ // short-circuited boolean operators).
+ ASSERT(has_valid_frame());
+ if (true_target.is_linked() || false_target.is_linked()) {
+ JumpTarget loaded;
+ loaded.Jump(); // Don't lose the current TOS.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ }
+ }
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ }
+ loaded.Bind();
+ }
+ }
+
+ ASSERT(has_valid_frame());
+ ASSERT(frame_->height() == original_height + 1);
+}
-class DeferredReferenceGetKeyedValue: public DeferredCode {
+void CodeGenerator::LoadGlobal() {
+ if (in_spilled_code()) {
+ frame_->EmitPush(GlobalObject());
+ } else {
+ Result temp = allocator_->Allocate();
+ __ movq(temp.reg(), GlobalObject());
+ frame_->Push(&temp);
+ }
+}
+
+
+void CodeGenerator::LoadGlobalReceiver() {
+ Result temp = allocator_->Allocate();
+ Register reg = temp.reg();
+ __ movq(reg, GlobalObject());
+ __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->Push(&temp);
+}
+
+
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ Property property(&global, &key, RelocInfo::kNoPosition);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+ } else {
+ // Anything else can be handled normally.
+ Load(expr);
+ }
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+ if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope()->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope()->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope()->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+ Variable* arguments = scope()->arguments()->var();
+ Variable* shadow = scope()->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
+ if (probe.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ done.Branch(not_equal);
+ }
+ }
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ return frame_->Pop();
+}
+
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
+
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ ASSERT(is_unloaded() || is_illegal());
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ // References are loaded from both spilled and unspilled code. Set the
+ // state to unspilled to allow that (and explicitly spill after
+ // construction at the construction sites).
+ bool was_in_spilled_code = in_spilled_code_;
+ in_spilled_code_ = false;
+
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
+ Load(property->obj());
+ if (property->key()->IsPropertyName()) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ // If rax is free, the register allocator prefers it. Thus the code
+ // generator will load the global object into rax, which is where
+ // LoadIC wants it. Most uses of Reference call LoadIC directly
+ // after the reference is created.
+ frame_->Spill(rax);
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ // Anything else is a runtime error.
+ Load(e);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+
+ in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ frame_->Nip(ref->size());
+ ref->set_unloaded();
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+ Comment cmnt(masm_, "[ ToBoolean");
+
+ // The value to convert should be popped from the frame.
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ if (value.is_number()) {
+ // Fast case if TypeInfo indicates only numbers.
+ if (FLAG_debug_code) {
+ __ AbortIfNotNumber(value.reg());
+ }
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ if (value.is_smi()) {
+ value.Unuse();
+ dest->Split(not_zero);
+ } else {
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ value.Unuse();
+ dest->Split(not_zero);
+ }
+ } else {
+ // Fast case checks.
+ // 'false' => false.
+ __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
+ dest->false_target()->Branch(equal);
+
+ // 'true' => true.
+ __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
+ dest->true_target()->Branch(equal);
+
+ // 'undefined' => false.
+ __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+ dest->false_target()->Branch(equal);
+
+ // Smi => false iff zero.
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ testq(temp.reg(), temp.reg());
+ temp.Unuse();
+ dest->Split(not_equal);
+ }
+}
+
+
+class FloatingPointHelper : public AllStatic {
public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
+};
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
+
+
+// Call the specialized stub for a binary operation.
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+ DeferredInlineBinaryOperation(Token::Value op,
+ Register dst,
+ Register left,
+ Register right,
+ OverwriteMode mode)
+ : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+ set_comment("[ DeferredInlineBinaryOperation");
}
virtual void Generate();
- Label* patch_site() { return &patch_site_; }
-
private:
- Label patch_site_;
+ Token::Value op_;
Register dst_;
- Register receiver_;
- Register key_;
+ Register left_;
+ Register right_;
+ OverwriteMode mode_;
};
-void DeferredReferenceGetKeyedValue::Generate() {
- if (receiver_.is(rdx)) {
- if (!key_.is(rax)) {
- __ movq(rax, key_);
- } // else do nothing.
- } else if (receiver_.is(rax)) {
- if (key_.is(rdx)) {
- __ xchg(rax, rdx);
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rax, key_);
+void DeferredInlineBinaryOperation::Generate() {
+ Label done;
+ if ((op_ == Token::ADD)
+ || (op_ == Token::SUB)
+ || (op_ == Token::MUL)
+ || (op_ == Token::DIV)) {
+ Label call_runtime;
+ Label left_smi, right_smi, load_right, do_op;
+ __ JumpIfSmi(left_, &left_smi);
+ __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_LEFT) {
+ __ movq(dst_, left_);
}
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rax, key_);
- __ movq(rdx, receiver_);
- }
- // Calculate the delta from the IC call instruction to the map check
- // movq instruction in the inlined version. This delta is stored in
- // a test(rax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the movq instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- // TODO(X64): Consider whether it's worth switching the test to a
- // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
- // be generated normally.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+ __ jmp(&load_right);
+ __ bind(&left_smi);
+ __ SmiToInteger32(left_, left_);
+ __ cvtlsi2sd(xmm0, left_);
+ __ Integer32ToSmi(left_, left_);
+ if (mode_ == OVERWRITE_LEFT) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+
+ __ bind(&load_right);
+ __ JumpIfSmi(right_, &right_smi);
+ __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ movq(dst_, right_);
+ } else if (mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+ __ jmp(&do_op);
+
+ __ bind(&right_smi);
+ __ SmiToInteger32(right_, right_);
+ __ cvtlsi2sd(xmm1, right_);
+ __ Integer32ToSmi(right_, right_);
+ if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+ }
+
+ __ bind(&do_op);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
+ __ jmp(&done);
+
+ __ bind(&call_runtime);
+ }
+ GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, left_, right_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ bind(&done);
+}
+
+
+static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
+ Token::Value op,
+ const Result& right,
+ const Result& left) {
+ // Set TypeInfo of result according to the operation performed.
+ // We rely on the fact that smis have a 32 bit payload on x64.
+ STATIC_ASSERT(kSmiValueSize == 32);
+ switch (op) {
+ case Token::COMMA:
+ return right.type_info();
+ case Token::OR:
+ case Token::AND:
+ // Result type can be either of the two input types.
+ return operands_type;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ // Result is always a smi.
+ return TypeInfo::Smi();
+ case Token::SAR:
+ case Token::SHL:
+ // Result is always a smi.
+ return TypeInfo::Smi();
+ case Token::SHR:
+ // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
+ return (right.is_constant() && right.handle()->IsSmi()
+ && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
+ ? TypeInfo::Smi()
+ : TypeInfo::Number();
+ case Token::ADD:
+ if (operands_type.IsNumber()) {
+ return TypeInfo::Number();
+ } else if (left.type_info().IsString() || right.type_info().IsString()) {
+ return TypeInfo::String();
+ } else {
+ return TypeInfo::Unknown();
+ }
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Result is always a number.
+ return TypeInfo::Number();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return TypeInfo::Unknown();
+}
+
+
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
+ OverwriteMode overwrite_mode) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
+ Comment cmnt_token(masm_, Token::String(op));
+
+ if (op == Token::COMMA) {
+ // Simply discard left value.
+ frame_->Nip(1);
+ return;
+ }
+
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+
+ if (op == Token::ADD) {
+ const bool left_is_string = left.type_info().IsString();
+ const bool right_is_string = right.type_info().IsString();
+ // Make sure constant strings have string type info.
+ ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+ left_is_string);
+ ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+ right_is_string);
+ if (left_is_string || right_is_string) {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ Result answer;
+ if (left_is_string) {
+ if (right_is_string) {
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
+ } else {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+ }
+ } else if (right_is_string) {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+ }
+ answer.set_type_info(TypeInfo::String());
+ frame_->Push(&answer);
+ return;
+ }
+ // Neither operand is known to be a string.
+ }
+
+ bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi_constant =
+ right.is_constant() && !right.handle()->IsSmi();
+
+ if (left_is_smi_constant && right_is_smi_constant) {
+ // Compute the constant result at compile time, and leave it on the frame.
+ int left_int = Smi::cast(*left.handle())->value();
+ int right_int = Smi::cast(*right.handle())->value();
+ if (FoldConstantSmis(op, left_int, right_int)) return;
+ }
+
+ // Get number type of left and right sub-expressions.
+ TypeInfo operands_type =
+ TypeInfo::Combine(left.type_info(), right.type_info());
+
+ TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
+
+ Result answer;
+ if (left_is_non_smi_constant || right_is_non_smi_constant) {
+ // Go straight to the slow case, with no smi code.
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_SMI_CODE_IN_STUB,
+ operands_type);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ } else if (right_is_smi_constant) {
+ answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+ false, overwrite_mode);
+ } else if (left_is_smi_constant) {
+ answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+ true, overwrite_mode);
+ } else {
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 &&
+ (Token::IsBitOp(op) ||
+ operands_type.IsInteger32() ||
+ expr->type()->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op,
+ overwrite_mode,
+ NO_GENERIC_BINARY_FLAGS,
+ operands_type);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ }
+ }
+
+ answer.set_type_info(result_type);
+ frame_->Push(&answer);
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+ Object* answer_object = Heap::undefined_value();
+ switch (op) {
+ case Token::ADD:
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
+ answer_object = Smi::FromInt(left + right);
+ }
+ break;
+ case Token::SUB:
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
+ answer_object = Smi::FromInt(left - right);
+ }
+ break;
+ case Token::MUL: {
+ double answer = static_cast<double>(left) * right;
+ if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+ // If the product is zero and the non-zero factor is negative,
+ // the spec requires us to return floating point negative zero.
+ if (answer != 0 || (left >= 0 && right >= 0)) {
+ answer_object = Smi::FromInt(static_cast<int>(answer));
+ }
+ }
+ }
+ break;
+ case Token::DIV:
+ case Token::MOD:
+ break;
+ case Token::BIT_OR:
+ answer_object = Smi::FromInt(left | right);
+ break;
+ case Token::BIT_AND:
+ answer_object = Smi::FromInt(left & right);
+ break;
+ case Token::BIT_XOR:
+ answer_object = Smi::FromInt(left ^ right);
+ break;
+
+ case Token::SHL: {
+ int shift_amount = right & 0x1F;
+ if (Smi::IsValid(left << shift_amount)) {
+ answer_object = Smi::FromInt(left << shift_amount);
+ }
+ break;
+ }
+ case Token::SHR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ unsigned_left >>= shift_amount;
+ if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+ answer_object = Smi::FromInt(unsigned_left);
+ }
+ break;
+ }
+ case Token::SAR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ if (left < 0) {
+ // Perform arithmetic shift of a negative number by
+ // complementing number, logical shifting, complementing again.
+ unsigned_left = ~unsigned_left;
+ unsigned_left >>= shift_amount;
+ unsigned_left = ~unsigned_left;
+ } else {
+ unsigned_left >>= shift_amount;
+ }
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (answer_object == Heap::undefined_value()) {
+ return false;
+ }
+ frame_->Push(Handle<Object>(answer_object));
+ return true;
+}
+
+
+void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
+ TypeInfo type,
+ DeferredCode* deferred) {
+ if (!type.IsSmi()) {
+ __ JumpIfNotSmi(reg, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(reg);
+ }
+}
+
+
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+ Register right,
+ TypeInfo left_info,
+ TypeInfo right_info,
+ DeferredCode* deferred) {
+ if (!left_info.IsSmi() && !right_info.IsSmi()) {
+ __ JumpIfNotBothSmi(left, right, deferred->entry_label());
+ } else if (!left_info.IsSmi()) {
+ __ JumpIfNotSmi(left, deferred->entry_label());
+ } else if (!right_info.IsSmi()) {
+ __ JumpIfNotSmi(right, deferred->entry_label());
+ }
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+}
+
+
+// Implements a binary operation using a deferred code object and some
+// inline code to operate on smis quickly.
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ // Copy the type info because left and right may be overwritten.
+ TypeInfo left_type_info = left->type_info();
+ TypeInfo right_type_info = right->type_info();
+ Token::Value op = expr->op();
+ Result answer;
+ // Special handling of div and mod because they use fixed registers.
+ if (op == Token::DIV || op == Token::MOD) {
+ // We need rax as the quotient register, rdx as the remainder
+ // register, neither left nor right in rax or rdx, and left copied
+ // to rax.
+ Result quotient;
+ Result remainder;
+ bool left_is_in_rax = false;
+ // Step 1: get rax for quotient.
+ if ((left->is_register() && left->reg().is(rax)) ||
+ (right->is_register() && right->reg().is(rax))) {
+ // One or both is in rax. Use a fresh non-rdx register for
+ // them.
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (fresh.reg().is(rdx)) {
+ remainder = fresh;
+ fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ }
+ if (left->is_register() && left->reg().is(rax)) {
+ quotient = *left;
+ *left = fresh;
+ left_is_in_rax = true;
+ }
+ if (right->is_register() && right->reg().is(rax)) {
+ quotient = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rax);
+ } else {
+ // Neither left nor right is in rax.
+ quotient = allocator_->Allocate(rax);
+ }
+ ASSERT(quotient.is_register() && quotient.reg().is(rax));
+ ASSERT(!(left->is_register() && left->reg().is(rax)));
+ ASSERT(!(right->is_register() && right->reg().is(rax)));
+
+ // Step 2: get rdx for remainder if necessary.
+ if (!remainder.is_valid()) {
+ if ((left->is_register() && left->reg().is(rdx)) ||
+ (right->is_register() && right->reg().is(rdx))) {
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (left->is_register() && left->reg().is(rdx)) {
+ remainder = *left;
+ *left = fresh;
+ }
+ if (right->is_register() && right->reg().is(rdx)) {
+ remainder = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rdx);
+ } else {
+ // Neither left nor right is in rdx.
+ remainder = allocator_->Allocate(rdx);
+ }
+ }
+ ASSERT(remainder.is_register() && remainder.reg().is(rdx));
+ ASSERT(!(left->is_register() && left->reg().is(rdx)));
+ ASSERT(!(right->is_register() && right->reg().is(rdx)));
+
+ left->ToRegister();
+ right->ToRegister();
+ frame_->Spill(rax);
+ frame_->Spill(rdx);
+
+ // Check that left and right are smi tagged.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ (op == Token::DIV) ? rax : rdx,
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
+
+ if (op == Token::DIV) {
+ __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ answer = quotient;
+ } else {
+ ASSERT(op == Token::MOD);
+ __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ answer = remainder;
+ }
+ ASSERT(answer.is_valid());
+ return answer;
+ }
+
+ // Special handling of shift operations because they use fixed
+ // registers.
+ if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+ // Move left out of rcx if necessary.
+ if (left->is_register() && left->reg().is(rcx)) {
+ *left = allocator_->Allocate();
+ ASSERT(left->is_valid());
+ __ movq(left->reg(), rcx);
+ }
+ right->ToRegister(rcx);
+ left->ToRegister();
+ ASSERT(left->is_register() && !left->reg().is(rcx));
+ ASSERT(right->is_register() && right->reg().is(rcx));
+
+ // We will modify right, it must be spilled.
+ frame_->Spill(rcx);
+
+ // Use a fresh answer register to avoid spilling the left operand.
+ answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+ // Check that both operands are smis using the answer register as a
+ // temporary.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ rcx,
+ overwrite_mode);
+
+ Label do_op;
+ if (right_type_info.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(right->reg());
+ }
+ __ movq(answer.reg(), left->reg());
+ // If left is not known to be a smi, check if it is.
+ // If left is not known to be a number, and it isn't a smi, check if
+ // it is a HeapNumber.
+ if (!left_type_info.IsSmi()) {
+ __ JumpIfSmi(answer.reg(), &do_op);
+ if (!left_type_info.IsNumber()) {
+ // Branch if not a heapnumber.
+ __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ deferred->Branch(not_equal);
+ }
+ // Load integer value into answer register using truncation.
+ __ cvttsd2si(answer.reg(),
+ FieldOperand(answer.reg(), HeapNumber::kValueOffset));
+ // Branch if we might have overflowed.
+ // (False negative for Smi::kMinValue)
+ __ cmpq(answer.reg(), Immediate(0x80000000));
+ deferred->Branch(equal);
+ // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
+ __ Integer32ToSmi(answer.reg(), answer.reg());
+ } else {
+ // Fast case - both are actually smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left->reg());
+ }
+ }
+ } else {
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
+ left_type_info, right_type_info, deferred);
+ }
+ __ bind(&do_op);
+
+ // Perform the operation.
+ switch (op) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+ break;
+ case Token::SHR: {
+ __ SmiShiftLogicalRight(answer.reg(),
+ left->reg(),
+ rcx,
+ deferred->entry_label());
+ break;
+ }
+ case Token::SHL: {
+ __ SmiShiftLeft(answer.reg(),
+ left->reg(),
+ rcx);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ ASSERT(answer.is_valid());
+ return answer;
+ }
+
+ // Handle the other binary operations.
+ left->ToRegister();
+ right->ToRegister();
+ // A newly allocated register answer is used to hold the answer. The
+ // registers containing left and right are not modified so they don't
+ // need to be spilled in the fast case.
+ answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+
+ // Perform the smi tag check.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
+ left_type_info, right_type_info, deferred);
+
+ switch (op) {
+ case Token::ADD:
+ __ SmiAdd(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
+ break;
+
+ case Token::SUB:
+ __ SmiSub(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
+ break;
+
+ case Token::MUL: {
+ __ SmiMul(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
+ break;
+ }
+
+ case Token::BIT_OR:
+ __ SmiOr(answer.reg(), left->reg(), right->reg());
+ break;
+
+ case Token::BIT_AND:
+ __ SmiAnd(answer.reg(), left->reg(), right->reg());
+ break;
+
+ case Token::BIT_XOR:
+ __ SmiXor(answer.reg(), left->reg(), right->reg());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ Register dst,
+ Register src,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ src_(src),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register src_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
-class DeferredReferenceSetKeyedValue: public DeferredCode {
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceSetKeyedValue");
+ DeferredInlineSmiOperationReversed(Token::Value op,
+ Register dst,
+ Smi* value,
+ Register src,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ value_(value),
+ src_(src),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperationReversed");
}
virtual void Generate();
- Label* patch_site() { return &patch_site_; }
-
private:
- Register value_;
- Register key_;
- Register receiver_;
- Label patch_site_;
+ Token::Value op_;
+ Register dst_;
+ Smi* value_;
+ Register src_;
+ OverwriteMode overwrite_mode_;
};
-void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Move value, receiver, and key to registers rax, rdx, and rcx, as
- // the IC stub expects.
- // Move value to rax, using xchg if the receiver or key is in rax.
- if (!value_.is(rax)) {
- if (!receiver_.is(rax) && !key_.is(rax)) {
- __ movq(rax, value_);
- } else {
- __ xchg(rax, value_);
- // Update receiver_ and key_ if they are affected by the swap.
- if (receiver_.is(rax)) {
- receiver_ = value_;
- } else if (receiver_.is(value_)) {
- receiver_ = rax;
- }
- if (key_.is(rax)) {
- key_ = value_;
- } else if (key_.is(value_)) {
- key_ = rax;
- }
- }
- }
- // Value is now in rax. Its original location is remembered in value_,
- // and the value is restored to value_ before returning.
- // The variables receiver_ and key_ are not preserved.
- // Move receiver and key to rdx and rcx, swapping if necessary.
- if (receiver_.is(rdx)) {
- if (!key_.is(rcx)) {
- __ movq(rcx, key_);
- } // Else everything is already in the right place.
- } else if (receiver_.is(rcx)) {
- if (key_.is(rdx)) {
- __ xchg(rcx, rdx);
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rcx, key_);
- }
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rcx, key_);
- __ movq(rdx, receiver_);
+void DeferredInlineSmiOperationReversed::Generate() {
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, value_, src_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
}
- // Call the IC stub.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instructions (initial movq)
- // to the test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC).
- if (!value_.is(rax)) __ movq(value_, rax);
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAdd::Generate() {
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiAddReversed(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, dst_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiSub::Generate() {
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
+ Result* operand,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
+ // Generate inline code for a binary operation when one of the
+ // operands is a constant smi. Consumes the argument "operand".
+ if (IsUnsafeSmi(value)) {
+ Result unsafe_operand(value);
+ if (reversed) {
+ return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
+ overwrite_mode);
+ } else {
+ return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
+ overwrite_mode);
+ }
+ }
+
+ // Get the literal value.
+ Smi* smi_value = Smi::cast(*value);
+ int int_value = smi_value->value();
+
+ Token::Value op = expr->op();
+ Result answer;
+ switch (op) {
+ case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = NULL;
+ if (reversed) {
+ deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ } else {
+ deferred = new DeferredInlineSmiAdd(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ }
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiAddConstant(operand->reg(),
+ operand->reg(),
+ smi_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ answer = *operand;
+ break;
+ }
+
+ case Token::SUB: {
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ answer = *operand;
+ DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ // A smi currently fits in a 32-bit Immediate.
+ __ SmiSubConstant(operand->reg(),
+ operand->reg(),
+ smi_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ operand->Unuse();
+ }
+ break;
+ }
+
+ case Token::SAR:
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftArithmeticRightConstant(operand->reg(),
+ operand->reg(),
+ shift_value);
+ deferred->BindExit();
+ answer = *operand;
+ }
+ break;
+
+ case Token::SHR:
+ if (reversed) {
+ Result constant_operand(value);
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftLogicalRightConstant(answer.reg(),
+ operand->reg(),
+ shift_value,
+ deferred->entry_label());
+ deferred->BindExit();
+ operand->Unuse();
+ }
+ break;
+
+ case Token::SHL:
+ if (reversed) {
+ operand->ToRegister();
+
+ // We need rcx to be available to hold operand, and to be spilled.
+ // SmiShiftLeft implicitly modifies rcx.
+ if (operand->reg().is(rcx)) {
+ frame_->Spill(operand->reg());
+ answer = allocator()->Allocate();
+ } else {
+ Result rcx_reg = allocator()->Allocate(rcx);
+ // answer must not be rcx.
+ answer = allocator()->Allocate();
+ // rcx_reg goes out of scope.
+ }
+
+ DeferredInlineSmiOperationReversed* deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ operand->reg(),
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+
+ __ Move(answer.reg(), smi_value);
+ __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
+ operand->Unuse();
+
+ deferred->BindExit();
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ if (shift_value == 0) {
+ // Spill operand so it can be overwritten in the slow case.
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ deferred->BindExit();
+ answer = *operand;
+ } else {
+ // Use a fresh temporary for nonzero shift values.
+ answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ __ SmiShiftLeftConstant(answer.reg(),
+ operand->reg(),
+ shift_value);
+ deferred->BindExit();
+ operand->Unuse();
+ }
+ }
+ break;
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ if (reversed) {
+ // Bit operations with a constant smi are commutative.
+ // We can swap left and right operands with no problem.
+ // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
+ overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
+ }
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
+ deferred);
+ if (op == Token::BIT_AND) {
+ __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
+ } else if (op == Token::BIT_XOR) {
+ if (int_value != 0) {
+ __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
+ }
+ } else {
+ ASSERT(op == Token::BIT_OR);
+ if (int_value != 0) {
+ __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
+ }
+ }
+ deferred->BindExit();
+ answer = *operand;
+ break;
+ }
+
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ Move(operand->reg(), Smi::FromInt(0));
+ } else {
+ __ SmiAndConstant(operand->reg(),
+ operand->reg(),
+ Smi::FromInt(int_value - 1));
+ }
+ deferred->BindExit();
+ answer = *operand;
+ break; // This break only applies if we generated code for MOD.
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
+
+ default: {
+ Result constant_operand(value);
+ if (reversed) {
+ answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
+ overwrite_mode);
+ }
+ break;
+ }
+ }
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+static bool CouldBeNaN(const Result& result) {
+ if (result.type_info().IsSmi()) return false;
+ if (result.type_info().IsInteger32()) return false;
+ if (!result.is_constant()) return true;
+ if (!result.handle()->IsHeapNumber()) return false;
+ return isnan(HeapNumber::cast(*result.handle())->value());
+}
+
+
+// Convert from signed to unsigned comparison to match the way EFLAGS are set
+// by FPU and XMM compare instructions.
+static Condition DoubleCondition(Condition cc) {
+ switch (cc) {
+ case less: return below;
+ case equal: return equal;
+ case less_equal: return below_equal;
+ case greater: return above;
+ case greater_equal: return above_equal;
+ default: UNREACHABLE();
+ }
+ UNREACHABLE();
+ return equal;
+}
+
+
+void CodeGenerator::Comparison(AstNode* node,
+ Condition cc,
+ bool strict,
+ ControlDestination* dest) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
+
+ Result left_side;
+ Result right_side;
+ // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+ if (cc == greater || cc == less_equal) {
+ cc = ReverseCondition(cc);
+ left_side = frame_->Pop();
+ right_side = frame_->Pop();
+ } else {
+ right_side = frame_->Pop();
+ left_side = frame_->Pop();
+ }
+ ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+ // If either side is a constant smi, optimize the comparison.
+ bool left_side_constant_smi = false;
+ bool left_side_constant_null = false;
+ bool left_side_constant_1_char_string = false;
+ if (left_side.is_constant()) {
+ left_side_constant_smi = left_side.handle()->IsSmi();
+ left_side_constant_null = left_side.handle()->IsNull();
+ left_side_constant_1_char_string =
+ (left_side.handle()->IsString() &&
+ String::cast(*left_side.handle())->length() == 1 &&
+ String::cast(*left_side.handle())->IsAsciiRepresentation());
+ }
+ bool right_side_constant_smi = false;
+ bool right_side_constant_null = false;
+ bool right_side_constant_1_char_string = false;
+ if (right_side.is_constant()) {
+ right_side_constant_smi = right_side.handle()->IsSmi();
+ right_side_constant_null = right_side.handle()->IsNull();
+ right_side_constant_1_char_string =
+ (right_side.handle()->IsString() &&
+ String::cast(*right_side.handle())->length() == 1 &&
+ String::cast(*right_side.handle())->IsAsciiRepresentation());
+ }
+
+ if (left_side_constant_smi || right_side_constant_smi) {
+ bool is_loop_condition = (node->AsExpression() != NULL) &&
+ node->AsExpression()->is_loop_condition();
+ ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
+ left_side_constant_smi, right_side_constant_smi,
+ is_loop_condition);
+ } else if (cc == equal &&
+ (left_side_constant_null || right_side_constant_null)) {
+ // To make null checks efficient, we check if either the left side or
+ // the right side is the constant 'null'.
+ // If so, we optimize the code by inlining a null check instead of
+ // calling the (very) general runtime routine for checking equality.
+ Result operand = left_side_constant_null ? right_side : left_side;
+ right_side.Unuse();
+ left_side.Unuse();
+ operand.ToRegister();
+ __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
+ if (strict) {
+ operand.Unuse();
+ dest->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ dest->true_target()->Branch(equal);
+ __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
+ dest->true_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ dest->false_target()->Branch(is_smi);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ dest->Split(not_zero);
+ }
+ } else if (left_side_constant_1_char_string ||
+ right_side_constant_1_char_string) {
+ if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
+ // Trivial case, comparing two constants.
+ int left_value = String::cast(*left_side.handle())->Get(0);
+ int right_value = String::cast(*right_side.handle())->Get(0);
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant 1 character string.
+ // If left side is a constant 1-character string, reverse the operands.
+ // Since one side is a constant string, conversion order does not matter.
+ if (left_side_constant_1_char_string) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant string, inlining the case
+ // where both sides are strings.
+ left_side.ToRegister();
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_not_string, is_string;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
+ Condition is_smi = masm()->CheckSmi(left_reg);
+ is_not_string.Branch(is_smi, &left_side);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(left_reg, HeapObject::kMapOffset));
+ __ movzxbl(temp.reg(),
+ FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ // If we are testing for equality then make use of the symbol shortcut.
+ // Check if the left hand side has the same type as the right hand
+ // side (which is always a symbol).
+ if (cc == equal) {
+ Label not_a_symbol;
+ ASSERT(kSymbolTag != 0);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
+ __ j(zero, ¬_a_symbol);
+ // They are symbols, so do identity compare.
+ __ Cmp(left_reg, right_side.handle());
+ dest->true_target()->Branch(equal);
+ dest->false_target()->Branch(not_equal);
+ __ bind(¬_a_symbol);
+ }
+ // Call the compare stub if the left side is not a flat ascii string.
+ __ andb(temp.reg(),
+ Immediate(kIsNotStringMask |
+ kStringRepresentationMask |
+ kStringEncodingMask));
+ __ cmpb(temp.reg(),
+ Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ temp.Unuse();
+ is_string.Branch(equal, &left_side);
+
+ // Setup and call the compare stub.
+ is_not_string.Bind(&left_side);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_string.Bind(&left_side);
+ // left_side is a sequential ASCII string.
+ ASSERT(left_side.reg().is(left_reg));
+ right_side = Result(right_val);
+ Result temp2 = allocator_->Allocate();
+ ASSERT(temp2.is_valid());
+ // Test string equality and comparison.
+ if (cc == equal) {
+ Label comparison_done;
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
+ __ j(not_equal, &comparison_done);
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_val)->Get(0));
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ Immediate(char_value));
+ __ bind(&comparison_done);
+ } else {
+ __ movq(temp2.reg(),
+ FieldOperand(left_side.reg(), String::kLengthOffset));
+ __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
+ Label comparison;
+ // If the length is 0 then the subtraction gave -1 which compares less
+ // than any character.
+ __ j(negative, &comparison);
+ // Otherwise load the first character.
+ __ movzxbl(temp2.reg(),
+ FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
+ __ bind(&comparison);
+ // Compare the first character of the string with the
+ // constant 1-character string.
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmpb(temp2.reg(), Immediate(char_value));
+ Label characters_were_different;
+ __ j(not_equal, &characters_were_different);
+ // If the first character is the same then the long string sorts after
+ // the short one.
+ __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Smi::FromInt(1));
+ __ bind(&characters_were_different);
+ }
+ temp2.Unuse();
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
+ }
+ } else {
+ // Neither side is a constant Smi, constant 1-char string, or constant null.
+ // If either side is a non-smi constant, or known to be a heap number,
+ // skip the smi check.
+ bool known_non_smi =
+ (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+ (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
+ left_side.type_info().IsDouble() ||
+ right_side.type_info().IsDouble();
+
+ NaNInformation nan_info =
+ (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
+ kBothCouldBeNaN :
+ kCantBothBeNaN;
+
+ // Inline number comparison handling any combination of smi's and heap
+ // numbers if:
+ // code is in a loop
+ // the compare operation is different from equal
+ // compare is not a for-loop comparison
+ // The reason for excluding equal is that it will most likely be done
+ // with smi's (not heap numbers) and the code to comparing smi's is inlined
+ // separately. The same reason applies for for-loop comparison which will
+ // also most likely be smi comparisons.
+ bool is_loop_condition = (node->AsExpression() != NULL)
+ && node->AsExpression()->is_loop_condition();
+ bool inline_number_compare =
+ loop_nesting() > 0 && cc != equal && !is_loop_condition;
+
+ // Left and right needed in registers for the following code.
+ left_side.ToRegister();
+ right_side.ToRegister();
+
+ if (known_non_smi) {
+ // Inlined equality check:
+ // If at least one of the objects is not NaN, then if the objects
+ // are identical, they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
+
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
+
+ // End of in-line compare, call out to the compare stub. Don't include
+ // number comparison in the stub if it was inlined.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
+ answer.Unuse();
+ dest->Split(cc);
+ } else {
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Register right_reg = right_side.reg();
+
+ Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
+ is_smi.Branch(both_smi);
+
+ // Inline the equality check if both operands can't be a NaN. If both
+ // objects are the same they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
+
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
+
+ // End of in-line compare, call out to the compare stub. Don't include
+ // number comparison in the stub if it was inlined.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
+ answer.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
+ __ SmiCompare(left_side.reg(), right_side.reg());
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
+ }
+ }
+}
+
+
+void CodeGenerator::ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* dest,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side->handle())->value();
+ int right_value = Smi::cast(*right_side->handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result* temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may re-introduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side->ToRegister();
+ Register left_reg = left_side->reg();
+ Smi* constant_smi = Smi::cast(*right_side->handle());
+
+ if (left_side->is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left_reg);
+ }
+ // Test smi equality and comparison by signed int comparison.
+ // Both sides are smis, so we can use an Immediate.
+ __ SmiCompare(left_reg, constant_smi);
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ } else {
+ // Only the case where the left side could possibly be a non-smi is left.
+ JumpTarget is_smi;
+ if (cc == equal) {
+ // We can do the equality comparison before the smi check.
+ __ SmiCompare(left_reg, constant_smi);
+ dest->true_target()->Branch(equal);
+ Condition left_is_smi = masm_->CheckSmi(left_reg);
+ dest->false_target()->Branch(left_is_smi);
+ } else {
+ // Do the smi check, then the comparison.
+ Condition left_is_smi = masm_->CheckSmi(left_reg);
+ is_smi.Branch(left_is_smi, left_side, right_side);
+ }
+
+ // Jump or fall through to here if we are comparing a non-smi to a
+ // constant smi. If the non-smi is a heap number and this is not
+ // a loop condition, inline the floating point code.
+ if (!is_loop_condition) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ JumpTarget not_number;
+ __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ not_number.Branch(not_equal, left_side);
+ __ movsd(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = constant_smi->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ movl(temp.reg(), Immediate(value));
+ __ cvtlsi2sd(xmm0, temp.reg());
+ temp.Unuse();
+ }
+ __ ucomisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, left_side);
+ left_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+ not_number.Bind(left_side);
+ }
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, left_side, right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ if (cc == equal) {
+ dest->Split(cc);
+ } else {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ // It is important for performance for this case to be at the end.
+ is_smi.Bind(left_side, right_side);
+ __ SmiCompare(left_reg, constant_smi);
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->Split(cc);
+ }
+ }
+ }
+}
+
+
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+ Result* operand,
+ XMMRegister xmm_reg,
+ Result* left_side,
+ Result* right_side,
+ JumpTarget* not_numbers) {
+ Label done;
+ if (operand->type_info().IsDouble()) {
+ // Operand is known to be a heap number, just load it.
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ } else if (operand->type_info().IsSmi()) {
+ // Operand is known to be a smi. Convert it to double and keep the original
+ // smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ } else {
+ // Operand type not known, check for smi or heap number.
+ Label smi;
+ __ JumpIfSmi(operand->reg(), &smi);
+ if (!operand->type_info().IsNumber()) {
+ __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ not_numbers->Branch(not_equal, left_side, right_side, taken);
+ }
+ __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&smi);
+ // Comvert smi to float and keep the original smi.
+ __ SmiToInteger32(kScratchRegister, operand->reg());
+ __ cvtlsi2sd(xmm_reg, kScratchRegister);
+ __ jmp(&done);
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
+ Result* right_side,
+ Condition cc,
+ ControlDestination* dest) {
+ ASSERT(left_side->is_register());
+ ASSERT(right_side->is_register());
+
+ JumpTarget not_numbers;
+ // Load left and right operand into registers xmm0 and xmm1 and compare.
+ LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
+ ¬_numbers);
+ LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
+ ¬_numbers);
+ __ ucomisd(xmm0, xmm1);
+ // Bail out if a NaN is involved.
+ not_numbers.Branch(parity_even, left_side, right_side);
+
+ // Split to destination targets based on comparison.
+ left_side->Unuse();
+ right_side->Unuse();
+ dest->true_target()->Branch(DoubleCondition(cc));
+ dest->false_target()->Jump();
+
+ not_numbers.Bind(left_side, right_side);
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ frame_->SpillTop();
+ }
+
+ // Record the position for debugging purposes.
+ CodeForSourcePosition(position);
+
+ // Use the shared code stub to call the function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop, flags);
+ Result answer = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore context and replace function on the stack with the
+ // result of the stub invocation.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
}
@@ -967,7 +2711,6 @@
void CodeGenerator::VisitAndSpill(Statement* statement) {
- // TODO(X64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Visit(statement);
@@ -979,6 +2722,9 @@
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
ASSERT(in_spilled_code());
set_in_spilled_code(false);
VisitStatements(statements);
@@ -986,14 +2732,20 @@
frame_->SpillAll();
}
set_in_spilled_code(true);
+
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
ASSERT(!in_spilled_code());
for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
Visit(statements->at(i));
}
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@@ -1010,6 +2762,21 @@
}
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(rsi); // The context is the first argument.
+ frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
@@ -1214,6 +2981,7 @@
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
+ masm()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@@ -1230,6 +2998,45 @@
}
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+ // The return value is a live (but not currently reference counted)
+ // reference to rax. This is safe because the current frame does not
+ // contain a reference to rax (it is prepared for the return by spilling
+ // all registers).
+ if (FLAG_trace) {
+ frame_->Push(return_value);
+ *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
+ return_value->ToRegister(rax);
+
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+
+ // Leave the frame and return popping the arguments and the
+ // receiver.
+ frame_->Exit();
+ masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
+ DeleteFrame();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
+ // with length 7 (3 + 1 + 3).
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ for (int i = 0; i < kPadding; ++i) {
+ masm_->int3();
+ }
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+}
+
+
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ WithEnterStatement");
@@ -1265,8 +3072,6 @@
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- // TODO(X64): This code is completely generic and should be moved somewhere
- // where it can be shared between architectures.
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
@@ -1558,8 +3363,8 @@
LoadCondition(node->cond(), &dest, true);
}
} else {
- // If we have chosen not to recompile the test at the
- // bottom, jump back to the one at the top.
+ // If we have chosen not to recompile the test at the bottom,
+ // jump back to the one at the top.
if (has_valid_frame()) {
node->continue_target()->Jump();
}
@@ -1665,49 +3470,56 @@
CodeForStatementPosition(node);
Slot* loop_var_slot = loop_var->slot();
if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(loop_var_slot->index());
+ frame_->TakeLocalAt(loop_var_slot->index());
} else {
ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->PushParameterAt(loop_var_slot->index());
+ frame_->TakeParameterAt(loop_var_slot->index());
}
Result loop_var_result = frame_->Pop();
if (!loop_var_result.is_register()) {
loop_var_result.ToRegister();
}
-
+ Register loop_var_reg = loop_var_result.reg();
+ frame_->Spill(loop_var_reg);
if (increments) {
- __ SmiAddConstant(loop_var_result.reg(),
- loop_var_result.reg(),
+ __ SmiAddConstant(loop_var_reg,
+ loop_var_reg,
Smi::FromInt(1));
} else {
- __ SmiSubConstant(loop_var_result.reg(),
- loop_var_result.reg(),
+ __ SmiSubConstant(loop_var_reg,
+ loop_var_reg,
Smi::FromInt(1));
}
- {
- __ SmiCompare(loop_var_result.reg(), limit_value);
- Condition condition;
- switch (compare_op) {
- case Token::LT:
- condition = less;
- break;
- case Token::LTE:
- condition = less_equal;
- break;
- case Token::GT:
- condition = greater;
- break;
- case Token::GTE:
- condition = greater_equal;
- break;
- default:
- condition = never;
- UNREACHABLE();
- }
- loop.Branch(condition);
+ frame_->Push(&loop_var_result);
+ if (loop_var_slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(loop_var_slot->index());
+ } else {
+ ASSERT(loop_var_slot->type() == Slot::PARAMETER);
+ frame_->StoreToParameterAt(loop_var_slot->index());
}
- loop_var_result.Unuse();
+ frame_->Drop();
+
+ __ SmiCompare(loop_var_reg, limit_value);
+ Condition condition;
+ switch (compare_op) {
+ case Token::LT:
+ condition = less;
+ break;
+ case Token::LTE:
+ condition = less_equal;
+ break;
+ case Token::GT:
+ condition = greater;
+ break;
+ case Token::GTE:
+ condition = greater_equal;
+ break;
+ default:
+ condition = never;
+ UNREACHABLE();
+ }
+ loop.Branch(condition);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -2114,6 +3926,7 @@
node->break_target()->Unuse();
}
+
void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
@@ -2531,6 +4344,349 @@
}
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ JumpTarget slow;
+ JumpTarget done;
+ Result value;
+
+ // Generate fast case for loading from slots that correspond to
+ // local/global variables or arguments unless they are shadowed by
+ // eval-introduced bindings.
+ EmitDynamicLoadFromSlotFastCase(slot,
+ typeof_state,
+ &value,
+ &slow,
+ &done);
+
+ slow.Bind();
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ if (typeof_state == INSIDE_TYPEOF) {
+ value =
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ }
+
+ done.Bind(&value);
+ frame_->Push(&value);
+
+ } else if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ //
+ // We currently spill the virtual frame because constants use the
+ // potentially unsafe direct-frame access of SlotOperand.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Load const");
+ JumpTarget exit;
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ exit.Branch(not_equal);
+ __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
+ exit.Bind();
+ frame_->EmitPush(rcx);
+
+ } else if (slot->type() == Slot::PARAMETER) {
+ frame_->PushParameterAt(slot->index());
+
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
+
+ } else {
+ // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+ // here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because it will always be a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
+ frame_->Push(&temp);
+ }
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
+ }
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
+ // Check that no extension objects have been created by calls to
+ // eval from the current scope to the global scope.
+ Register context = rsi;
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(tmp.reg())) {
+ __ movq(tmp.reg(), context);
+ }
+ // Load map for comparison into register, outside loop.
+ __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
+ __ bind(&next);
+ // Terminate at global context.
+ __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
+ __ j(equal, &fast);
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal);
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+ tmp.Unuse();
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ LoadGlobal();
+ frame_->Push(slot->var()->name());
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame_->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test rax
+ // instruction here.
+ masm_->nop();
+ return answer;
+}
+
+
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ Result* result,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ done->Jump(result);
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ *result = allocator_->Allocate();
+ ASSERT(result->is_valid());
+ __ movq(result->reg(),
+ ContextSlotOperandCheckExtensions(potential_slot,
+ *result,
+ slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
+ done->Branch(not_equal, result);
+ __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
+ }
+ done->Jump(result);
+ } else if (rewrite != NULL) {
+ // Generate fast case for argument loads.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ Result arguments = allocator()->Allocate();
+ ASSERT(arguments.is_valid());
+ __ movq(arguments.reg(),
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+ arguments,
+ slow));
+ frame_->Push(&arguments);
+ frame_->Push(key_literal->handle());
+ *result = EmitKeyedLoad();
+ done->Jump(result);
+ }
+ }
+ }
+ }
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(slot->var()->name());
+
+ Result value;
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame_->Push(&value);
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ //
+ // We spill the frame in the code below because the direct-frame
+ // access of SlotOperand is potentially unsafe with an unspilled
+ // frame.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Init const");
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
+ exit.Branch(not_equal);
+ }
+
+ // We must execute the store. Storing a variable must keep the (new)
+ // value on the stack. This is necessary for compiling assignment
+ // expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this code.
+ if (slot->type() == Slot::PARAMETER) {
+ frame_->StoreToParameterAt(slot->index());
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(slot->index());
+ } else {
+ // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because the slot is a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ frame_->Dup();
+ Result value = frame_->Pop();
+ value.ToRegister();
+ Result start = allocator_->Allocate();
+ ASSERT(start.is_valid());
+ __ movq(SlotOperand(slot, start.reg()), value.reg());
+ // RecordWrite may destroy the value registers.
+ //
+ // TODO(204): Avoid actually spilling when the value is not
+ // needed (probably the common case).
+ frame_->Spill(value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+ // The results start, value, and temp are unused by going out of
+ // scope.
+ }
+
+ exit.Bind();
+ }
+}
+
+
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
@@ -2557,6 +4713,17 @@
}
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+ UNIMPLEMENTED();
+ // TODO(X64): Implement security policy for loads of smis.
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+ return false;
+}
+
+
// Materialize the regexp literal 'node' in the literals array
// 'literals' of the function. Leave the regexp boilerplate in
// 'boilerplate'.
@@ -3245,6 +5412,1356 @@
}
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ value.Unuse();
+ destination()->Split(is_smi);
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (ShouldGenerateLog(args->at(0))) {
+ Load(args->at(1));
+ Load(args->at(2));
+ frame_->CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
+ value.Unuse();
+ destination()->Split(positive_smi);
+}
+
+
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result_, Heap::kNanValueRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharCodeAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need two extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharFromCode");
+ ASSERT(args->length() == 1);
+
+ Load(args->at(0));
+
+ Result code = frame_->Pop();
+ code.ToRegister();
+ ASSERT(code.is_valid());
+
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code.reg(), result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result_, Smi::FromInt(0));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need three extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch1 = allocator()->Allocate();
+ ASSERT(scratch1.is_valid());
+ Result scratch2 = allocator()->Allocate();
+ ASSERT(scratch2.is_valid());
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object.reg(),
+ index.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It is a heap object - get map.
+ // Check if the object is a JS array or not.
+ __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // It is a heap object - get map.
+ // Check if the object is a regexp.
+ __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+
+ __ Move(kScratchRegister, Factory::null_value());
+ __ cmpq(obj.reg(), kScratchRegister);
+ destination()->true_target()->Branch(equal);
+
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ movzxbq(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ destination()->false_target()->Branch(below);
+ __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ obj.Unuse();
+ destination()->Split(below_equal);
+}
+
+
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
+ // typeof(arg) == function).
+ // It includes undetectable objects (as opposed to IsObject).
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
+ // Check that this is an object.
+ __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(above_equal);
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ obj.Unuse();
+ destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ destination()->Split(not_zero);
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ // Get the frame pointer for the calling frame.
+ Result fp = allocator()->Allocate();
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+ fp.Unuse();
+ destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Result fp = allocator_->Allocate();
+ Result result = allocator_->Allocate();
+ ASSERT(fp.is_valid() && result.is_valid());
+
+ Label exit;
+
+ // Get the number of formal parameters.
+ __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ movq(result.reg(),
+ Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ result.set_type_info(TypeInfo::Smi());
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(result.reg());
+ }
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave, null, function, non_function_constructor;
+ Load(args->at(0)); // Load the object.
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ frame_->Spill(obj.reg());
+
+ // If the object is a smi, we return null.
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ null.Branch(is_smi);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+ null.Branch(below);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
+
+ // Check if the constructor in the map is a function.
+ __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ non_function_constructor.Branch(not_equal);
+
+ // The obj register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(),
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->Push(&obj);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ frame_->Push(Factory::function_class_symbol());
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ frame_->Push(Factory::Object_symbol());
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ frame_->Push(Factory::null_value());
+
+ // All done.
+ leave.Bind();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ frame_->Dup();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ ASSERT(object.is_valid());
+ // if (object->IsSmi()) return object.
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi);
+ // It is a heap object - get map.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ // if (!object->IsJSValue()) return object.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+ leave.Branch(not_equal);
+ __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+ object.Unuse();
+ frame_->SetElementAt(0, &temp);
+ leave.Bind();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
+ Result value = frame_->Pop();
+ Result object = frame_->Pop();
+ value.ToRegister();
+ object.ToRegister();
+
+ // if (object->IsSmi()) return value.
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi, &value);
+
+ // It is a heap object - get its map.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ // if (!object->IsJSValue()) return value.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+ leave.Branch(not_equal, &value);
+
+ // Store the value.
+ __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ Result duplicate_value = allocator_->Allocate();
+ ASSERT(duplicate_value.is_valid());
+ __ movq(duplicate_value.reg(), value.reg());
+ // The object register is also overwritten by the write barrier and
+ // possibly aliased in the frame.
+ frame_->Spill(object.reg());
+ __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+ scratch.reg());
+ object.Unuse();
+ scratch.Unuse();
+ duplicate_value.Unuse();
+
+ // Leave.
+ leave.Bind(&value);
+ frame_->Push(&value);
+}
+
+
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
+ Load(args->at(0));
+ Result key = frame_->Pop();
+ // Explicitly create a constant result.
+ Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ Result result = frame_->CallStub(&stub, &key, &count);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ Load(args->at(0));
+ Load(args->at(1));
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+ right.ToRegister();
+ left.ToRegister();
+ __ cmpq(right.reg(), left.reg());
+ right.Unuse();
+ left.Unuse();
+ destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ // RBP value is aligned, so it should be tagged as a smi (without necesarily
+ // being padded as a smi, so it should not be treated as a smi.).
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ Result rbp_as_smi = allocator_->Allocate();
+ ASSERT(rbp_as_smi.is_valid());
+ __ movq(rbp_as_smi.reg(), rbp);
+ frame_->Push(&rbp_as_smi);
+}
+
+
+void CodeGenerator::GenerateRandomHeapNumber(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ frame_->SpillAll();
+
+ Label slow_allocate_heapnumber;
+ Label heapnumber_allocated;
+ __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rbx, rax);
+
+ __ bind(&heapnumber_allocated);
+
+ // Return a random uint32 number in rax.
+ // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+ // Convert 32 random bits in rax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm1, rcx);
+ __ movd(xmm0, rax);
+ __ cvtss2sd(xmm1, xmm1);
+ __ xorpd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+
+ __ movq(rax, rbx);
+ Result result = allocator_->Allocate(rax);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringCompareStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 4);
+
+ // Load the arguments on the stack and call the runtime system.
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+ RegExpExecStub stub;
+ Result result = frame_->CallStub(&stub, 4);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+ // No stub. This code only occurs a few times in regexp.js.
+ const int kMaxInlineLength = 100;
+ ASSERT_EQ(3, args->length());
+ Load(args->at(0)); // Size of array, smi.
+ Load(args->at(1)); // "index" property value.
+ Load(args->at(2)); // "input" property value.
+ {
+ VirtualFrame::SpilledScope spilled_scope;
+
+ Label slowcase;
+ Label done;
+ __ movq(r8, Operand(rsp, kPointerSize * 2));
+ __ JumpIfNotSmi(r8, &slowcase);
+ __ SmiToInteger32(rbx, r8);
+ __ cmpl(rbx, Immediate(kMaxInlineLength));
+ __ j(above, &slowcase);
+ // Smi-tagging is equivalent to multiplying by 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
+ // Elements: [Map][Length][..elements..]
+ __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+ times_pointer_size,
+ rbx, // In: Number of elements.
+ rax, // Out: Start of allocation (tagged).
+ rcx, // Out: End of allocation.
+ rdx, // Scratch register
+ &slowcase,
+ TAG_OBJECT);
+ // rax: Start of allocated area, object-tagged.
+ // rbx: Number of array elements as int32.
+ // r8: Number of array elements as smi.
+
+ // Set JSArray map to global.regexp_result_map().
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
+
+ // Set empty properties FixedArray.
+ __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+ Factory::empty_fixed_array());
+
+ // Set elements to point to FixedArray allocated right after the JSArray.
+ __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+ // Set input, index and length fields from arguments.
+ __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
+ __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
+ __ lea(rsp, Operand(rsp, kPointerSize));
+ __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+
+ // Fill out the elements FixedArray.
+ // rax: JSArray.
+ // rcx: FixedArray.
+ // rbx: Number of elements in array as int32.
+
+ // Set map.
+ __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ // Set length.
+ __ Integer32ToSmi(rdx, rbx);
+ __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
+ // Fill contents of fixed-array with the-hole.
+ __ Move(rdx, Factory::the_hole_value());
+ __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
+ // Fill fixed array elements with hole.
+ // rax: JSArray.
+ // rbx: Number of elements in array that remains to be filled, as int32.
+ // rcx: Start of elements in FixedArray.
+ // rdx: the hole.
+ Label loop;
+ __ testl(rbx, rbx);
+ __ bind(&loop);
+ __ j(less_equal, &done); // Jump if ecx is negative or zero.
+ __ subl(rbx, Immediate(1));
+ __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
+ __ jmp(&loop);
+
+ __ bind(&slowcase);
+ __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+
+ __ bind(&done);
+ }
+ frame_->Forget(3);
+ frame_->Push(rax);
+}
+
+
+class DeferredSearchCache: public DeferredCode {
+ public:
+ DeferredSearchCache(Register dst,
+ Register cache,
+ Register key,
+ Register scratch)
+ : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
+ set_comment("[ DeferredSearchCache");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_; // on invocation index of finger (as int32), on exit
+ // holds value being looked up.
+ Register cache_; // instance of JSFunctionResultCache.
+ Register key_; // key being looked up.
+ Register scratch_;
+};
+
+
+// Return a position of the element at |index| + |additional_offset|
+// in FixedArray pointer to which is held in |array|. |index| is int32.
+static Operand ArrayElement(Register array,
+ Register index,
+ int additional_offset = 0) {
+ int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+ return FieldOperand(array, index, times_pointer_size, offset);
+}
+
+
+void DeferredSearchCache::Generate() {
+ Label first_loop, search_further, second_loop, cache_miss;
+
+ Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
+ Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
+
+ // Check the cache from finger to start of the cache.
+ __ bind(&first_loop);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, kEntriesIndexImm);
+ __ j(less, &search_further);
+
+ __ cmpq(ArrayElement(cache_, dst_), key_);
+ __ j(not_equal, &first_loop);
+
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
+ __ movq(dst_, ArrayElement(cache_, dst_, 1));
+ __ jmp(exit_label());
+
+ __ bind(&search_further);
+
+ // Check the cache from end of cache up to finger.
+ __ SmiToInteger32(dst_,
+ FieldOperand(cache_,
+ JSFunctionResultCache::kCacheSizeOffset));
+ __ SmiToInteger32(scratch_,
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
+
+ __ bind(&second_loop);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, scratch_);
+ __ j(less_equal, &cache_miss);
+
+ __ cmpq(ArrayElement(cache_, dst_), key_);
+ __ j(not_equal, &second_loop);
+
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
+ __ movq(dst_, ArrayElement(cache_, dst_, 1));
+ __ jmp(exit_label());
+
+ __ bind(&cache_miss);
+ __ push(cache_); // store a reference to cache
+ __ push(key_); // store a key
+ __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ push(key_);
+ // On x64 function must be in rdi.
+ __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
+ ParameterCount expected(1);
+ __ InvokeFunction(rdi, expected, CALL_FUNCTION);
+
+ // Find a place to put new cached value into.
+ Label add_new_entry, update_cache;
+ __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
+ // Possible optimization: cache size is constant for the given cache
+ // so technically we could use a constant here. However, if we have
+ // cache miss this optimization would hardly matter much.
+
+ // Check if we could add new entry to cache.
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(r9,
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
+ __ cmpl(rbx, r9);
+ __ j(greater, &add_new_entry);
+
+ // Check if we could evict entry after finger.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ addl(rdx, kEntrySizeImm);
+ Label forward;
+ __ cmpl(rbx, rdx);
+ __ j(greater, &forward);
+ // Need to wrap over the cache.
+ __ movl(rdx, kEntriesIndexImm);
+ __ bind(&forward);
+ __ movl(r9, rdx);
+ __ jmp(&update_cache);
+
+ __ bind(&add_new_entry);
+ // r9 holds cache size as int32.
+ __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
+
+ // Update the cache itself.
+ // r9 holds the index as int32.
+ __ bind(&update_cache);
+ __ pop(rbx); // restore the key
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
+ // Store key.
+ __ movq(ArrayElement(rcx, r9), rbx);
+ __ RecordWrite(rcx, 0, rbx, r9);
+
+ // Store value.
+ __ pop(rcx); // restore the cache.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ incl(rdx);
+ // Backup rax, because the RecordWrite macro clobbers its arguments.
+ __ movq(rbx, rax);
+ __ movq(ArrayElement(rcx, rdx), rax);
+ __ RecordWrite(rcx, 0, rbx, rdx);
+
+ if (!dst_.is(rax)) {
+ __ movq(dst_, rax);
+ }
+}
+
+
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ Top::global_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort("Attempt to use undefined cache.");
+ frame_->Push(Factory::undefined_value());
+ return;
+ }
+
+ Load(args->at(1));
+ Result key = frame_->Pop();
+ key.ToRegister();
+
+ Result cache = allocator()->Allocate();
+ ASSERT(cache.is_valid());
+ __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(cache.reg(),
+ FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
+ __ movq(cache.reg(),
+ ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ movq(cache.reg(),
+ FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
+
+ Result tmp = allocator()->Allocate();
+ ASSERT(tmp.is_valid());
+
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+
+ DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
+ cache.reg(),
+ key.reg(),
+ scratch.reg());
+
+ const int kFingerOffset =
+ FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
+ // tmp.reg() now holds finger offset as a smi.
+ __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
+ __ cmpq(key.reg(), FieldOperand(cache.reg(),
+ tmp.reg(), times_pointer_size,
+ FixedArray::kHeaderSize));
+ deferred->Branch(not_equal);
+ __ movq(tmp.reg(), FieldOperand(cache.reg(),
+ tmp.reg(), times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+
+ deferred->BindExit();
+ frame_->Push(&tmp);
+}
+
+
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and jump to the runtime.
+ Load(args->at(0));
+
+ NumberToStringStub stub;
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
+
+
+class DeferredSwapElements: public DeferredCode {
+ public:
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+ __ push(object_);
+ __ push(index1_);
+ __ push(index2_);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result index2 = frame_->Pop();
+ index2.ToRegister();
+
+ Result index1 = frame_->Pop();
+ index1.ToRegister();
+
+ Result object = frame_->Pop();
+ object.ToRegister();
+
+ Result tmp1 = allocator()->Allocate();
+ tmp1.ToRegister();
+ Result tmp2 = allocator()->Allocate();
+ tmp2.ToRegister();
+
+ frame_->Spill(object.reg());
+ frame_->Spill(index1.reg());
+ frame_->Spill(index2.reg());
+
+ DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+ index1.reg(),
+ index2.reg());
+
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+ deferred->Branch(below);
+ __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
+ Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ deferred->Branch(not_zero);
+
+ // Check the object's elements are in fast case.
+ __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ deferred->Branch(not_equal);
+
+ // Check that both indices are smis.
+ Condition both_smi = __ CheckBothSmi(index1.reg(), index2.reg());
+ deferred->Branch(NegateCondition(both_smi));
+
+ // Bring addresses into index1 and index2.
+ __ SmiToInteger32(index1.reg(), index1.reg());
+ __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+ index1.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(index2.reg(), index2.reg());
+ __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+ index2.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Swap elements.
+ __ movq(object.reg(), Operand(index1.reg(), 0));
+ __ movq(tmp2.reg(), Operand(index2.reg(), 0));
+ __ movq(Operand(index2.reg(), 0), object.reg());
+ __ movq(Operand(index1.reg(), 0), tmp2.reg());
+
+ Label done;
+ __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ movq(tmp2.reg(), tmp1.reg());
+ RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+ __ CallStub(&recordWrite1);
+
+ RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+ __ CallStub(&recordWrite2);
+
+ __ bind(&done);
+
+ deferred->BindExit();
+ frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateCallFunction");
+
+ ASSERT(args->length() >= 2);
+
+ int n_args = args->length() - 2; // for receiver and function.
+ Load(args->at(0)); // receiver
+ for (int i = 0; i < n_args; i++) {
+ Load(args->at(i + 1));
+ }
+ Load(args->at(n_args + 1)); // function
+ Result result = frame_->CallJSFunction(n_args);
+ frame_->Push(&result);
+}
+
+
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ Load(args->at(0));
+ Load(args->at(1));
+
+ Label allocate_return;
+ // Load the two operands while leaving the values on the frame.
+ frame()->Dup();
+ Result exponent = frame()->Pop();
+ exponent.ToRegister();
+ frame()->Spill(exponent.reg());
+ frame()->PushElementAt(1);
+ Result base = frame()->Pop();
+ base.ToRegister();
+ frame()->Spill(base.reg());
+
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ ASSERT(!exponent.reg().is(base.reg()));
+ JumpTarget call_runtime;
+
+ // Save 1 in xmm3 - we need this several times later on.
+ __ movl(answer.reg(), Immediate(1));
+ __ cvtlsi2sd(xmm3, answer.reg());
+
+ Label exponent_nonsmi;
+ Label base_nonsmi;
+ // If the exponent is a heap number go to that specific case.
+ __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
+ __ JumpIfNotSmi(base.reg(), &base_nonsmi);
+
+ // Optimized version when y is an integer.
+ Label powi;
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&powi);
+ // exponent is smi and base is a heapnumber.
+ __ bind(&base_nonsmi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+ // Optimized version of pow if y is an integer.
+ __ bind(&powi);
+ __ SmiToInteger32(exponent.reg(), exponent.reg());
+
+ // Save exponent in base as we need to check if exponent is negative later.
+ // We know that base and exponent are in different registers.
+ __ movl(base.reg(), exponent.reg());
+
+ // Get absolute value of exponent.
+ Label no_neg;
+ __ cmpl(exponent.reg(), Immediate(0));
+ __ j(greater_equal, &no_neg);
+ __ negl(exponent.reg());
+ __ bind(&no_neg);
+
+ // Load xmm1 with 1.
+ __ movsd(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
+
+ __ bind(&while_true);
+ __ shrl(exponent.reg(), Immediate(1));
+ __ j(not_carry, &no_multiply);
+ __ mulsd(xmm1, xmm0);
+ __ bind(&no_multiply);
+ __ testl(exponent.reg(), exponent.reg());
+ __ mulsd(xmm0, xmm0);
+ __ j(not_zero, &while_true);
+
+ // x has the original value of y - if y is negative return 1/result.
+ __ testl(base.reg(), base.reg());
+ __ j(positive, &allocate_return);
+ // Special case if xmm1 has reached infinity.
+ __ movl(answer.reg(), Immediate(0x7FB00000));
+ __ movd(xmm0, answer.reg());
+ __ cvtss2sd(xmm0, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ call_runtime.Branch(equal);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ jmp(&allocate_return);
+
+ // exponent (or both) is a heapnumber - no matter what we should now work
+ // on doubles.
+ __ bind(&exponent_nonsmi);
+ __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+ // Test if exponent is nan.
+ __ ucomisd(xmm1, xmm1);
+ call_runtime.Branch(parity_even);
+
+ Label base_not_smi;
+ Label handle_special_cases;
+ __ JumpIfNotSmi(base.reg(), &base_not_smi);
+ __ SmiToInteger32(base.reg(), base.reg());
+ __ cvtlsi2sd(xmm0, base.reg());
+ __ jmp(&handle_special_cases);
+ __ bind(&base_not_smi);
+ __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ call_runtime.Branch(not_equal);
+ __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+ __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+ // base is NaN or +/-Infinity
+ call_runtime.Branch(greater_equal);
+ __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+ // base is in xmm0 and exponent is in xmm1.
+ __ bind(&handle_special_cases);
+ Label not_minus_half;
+ // Test for -0.5.
+ // Load xmm2 with -0.5.
+ __ movl(answer.reg(), Immediate(0xBF000000));
+ __ movd(xmm2, answer.reg());
+ __ cvtss2sd(xmm2, xmm2);
+ // xmm2 now has -0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, ¬_minus_half);
+
+ // Calculates reciprocal of square root.
+ // Note that 1/sqrt(x) = sqrt(1/x))
+ __ divsd(xmm3, xmm0);
+ __ movsd(xmm1, xmm3);
+ __ sqrtsd(xmm1, xmm1);
+ __ jmp(&allocate_return);
+
+ // Test for 0.5.
+ __ bind(¬_minus_half);
+ // Load xmm2 with 0.5.
+ // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+ __ addsd(xmm2, xmm3);
+ // xmm2 now has 0.5.
+ __ ucomisd(xmm2, xmm1);
+ call_runtime.Branch(not_equal);
+
+ // Calculates square root.
+ __ movsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
+
+ JumpTarget done;
+ Label failure, success;
+ __ bind(&allocate_return);
+ // Make a copy of the frame to enable us to handle allocation
+ // failure after the JumpTarget jump.
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
+ __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+ // Remove the two original values from the frame - we only need those
+ // in the case where we branch to runtime.
+ frame()->Drop(2);
+ exponent.Unuse();
+ base.Unuse();
+ done.Jump(&answer);
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ // If we experience an allocation failure we branch to runtime.
+ __ bind(&failure);
+ call_runtime.Bind();
+ answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+
+ done.Bind(&answer);
+ frame()->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+ Load(args->at(0));
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
+ Result result = frame_->CallStub(&stub, 1);
+ frame_->Push(&result);
+}
+
+
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+
+ // Leave original value on the frame if we need to call runtime.
+ frame()->Dup();
+ Result result = frame()->Pop();
+ result.ToRegister();
+ frame()->Spill(result.reg());
+ Label runtime;
+ Label non_smi;
+ Label load_done;
+ JumpTarget end;
+
+ __ JumpIfNotSmi(result.reg(), &non_smi);
+ __ SmiToInteger32(result.reg(), result.reg());
+ __ cvtlsi2sd(xmm0, result.reg());
+ __ jmp(&load_done);
+ __ bind(&non_smi);
+ __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &runtime);
+ __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+
+ __ bind(&load_done);
+ __ sqrtsd(xmm0, xmm0);
+ // A copy of the virtual frame to allow us to go to runtime after the
+ // JumpTarget jump.
+ Result scratch = allocator()->Allocate();
+ VirtualFrame* clone = new VirtualFrame(frame());
+ __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
+
+ __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+ frame()->Drop(1);
+ scratch.Unuse();
+ end.Jump(&result);
+ // We only branch to runtime if we have an allocation error.
+ // Use the copy of the original frame as our current frame.
+ RegisterFile empty_regs;
+ SetFrame(clone, &empty_regs);
+ __ bind(&runtime);
+ result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+
+ end.Bind(&result);
+ frame()->Push(&result);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -3815,6 +7332,10 @@
}
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ frame_->PushFunction();
+}
+
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Comment cmnt(masm_, "[ CompareOperation");
@@ -3979,2880 +7500,21 @@
}
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
-
- __ Move(kScratchRegister, Factory::null_value());
- __ cmpq(obj.reg(), kScratchRegister);
- destination()->true_target()->Branch(equal);
-
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- destination()->false_target()->Branch(below);
- __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- obj.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- obj.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg());
- }
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result_, Smi::FromInt(0));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
- value.Unuse();
- destination()->Split(positive_smi);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(answer.reg(), Immediate(1));
- __ cvtlsi2sd(xmm3, answer.reg());
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
- __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
-
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiToInteger32(exponent.reg(), exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movl(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmpl(exponent.reg(), Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shrl(exponent.reg(), Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ testl(exponent.reg(), exponent.reg());
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ testl(base.reg(), base.reg());
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ movl(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, answer.reg());
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(base.reg(), &base_not_smi);
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movl(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, answer.reg());
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half);
-
- // Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
- __ sqrtsd(xmm1, xmm1);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
-
- // Calculates square root.
- __ movsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
- __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ JumpIfNotSmi(result.reg(), &non_smi);
- __ SmiToInteger32(result.reg(), result.reg());
- __ cvtlsi2sd(xmm0, result.reg());
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &runtime);
- __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
- __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- value.Unuse();
- destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmpq(right.reg(), left.reg());
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- // RBP value is aligned, so it should be tagged as a smi (without necesarily
- // being padded as a smi, so it should not be treated as a smi.).
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- Result rbp_as_smi = allocator_->Allocate();
- ASSERT(rbp_as_smi.is_valid());
- __ movq(rbp_as_smi.reg(), rbp);
- frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- Result result = allocator_->Allocate(rax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 4);
-
- // Load the arguments on the stack and call the runtime system.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- // No stub. This code only occurs a few times in regexp.js.
- const int kMaxInlineLength = 100;
- ASSERT_EQ(3, args->length());
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- {
- VirtualFrame::SpilledScope spilled_scope;
-
- Label slowcase;
- Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 2));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
- Factory::empty_fixed_array());
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
- __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
- __ lea(rsp, Operand(rsp, kPointerSize));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with the-hole.
- __ Move(rdx, Factory::the_hole_value());
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: the hole.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&slowcase);
- __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
- __ bind(&done);
- }
- frame_->Forget(3);
- frame_->Push(rax);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst,
- Register cache,
- Register key,
- Register scratch)
- : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation index of finger (as int32), on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
- Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|. |index| is int32.
-static Operand ArrayElement(Register array,
- Register index,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
- Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, kEntriesIndexImm);
- __ j(less, &search_further);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &first_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ SmiToInteger32(dst_,
- FieldOperand(cache_,
- JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(scratch_,
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
- __ bind(&second_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, scratch_);
- __ j(less_equal, &cache_miss);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &second_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On x64 function must be in rdi.
- __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ SmiToInteger32(r9,
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ cmpl(rbx, r9);
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ addl(rdx, kEntrySizeImm);
- Label forward;
- __ cmpl(rbx, rdx);
- __ j(greater, &forward);
- // Need to wrap over the cache.
- __ movl(rdx, kEntriesIndexImm);
- __ bind(&forward);
- __ movl(r9, rdx);
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- // r9 holds cache size as int32.
- __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
- // Update the cache itself.
- // r9 holds the index as int32.
- __ bind(&update_cache);
- __ pop(rbx); // restore the key
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
- // Store key.
- __ movq(ArrayElement(rcx, r9), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
-
- // Store value.
- __ pop(rcx); // restore the cache.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ incl(rdx);
- // Backup rax, because the RecordWrite macro clobbers its arguments.
- __ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rax);
- __ RecordWrite(rcx, 0, rbx, rdx);
-
- if (!dst_.is(rax)) {
- __ movq(dst_, rax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ movq(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg(),
- scratch.reg());
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- // tmp.reg() now holds finger offset as a smi.
- __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- __ cmpq(key.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize));
- deferred->Branch(not_equal);
- __ movq(tmp.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case.
- __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- deferred->Branch(not_equal);
-
- // Check that both indices are smis.
- Condition both_smi = __ CheckBothSmi(index1.reg(), index2.reg());
- deferred->Branch(NegateCondition(both_smi));
-
- // Bring addresses into index1 and index2.
- __ SmiToInteger32(index1.reg(), index1.reg());
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
- index1.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(index2.reg(), index2.reg());
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
- index2.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Swap elements.
- __ movq(object.reg(), Operand(index1.reg(), 0));
- __ movq(tmp2.reg(), Operand(index2.reg(), 0));
- __ movq(Operand(index2.reg(), 0), object.reg());
- __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ movq(tmp2.reg(), tmp1.reg());
- RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
- __ CallStub(&recordWrite1);
-
- RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
- __ CallStub(&recordWrite2);
-
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- Condition is_smi = masm_->CheckSmi(obj.reg());
- null.Branch(is_smi);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
-
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- non_function_constructor.Branch(not_equal);
-
- // The obj register now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ movq(obj.reg(),
- FieldOperand(obj.reg(),
- SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(Factory::function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(Factory::null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi, &value);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value);
-
- // Store the value.
- __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ movq(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal);
- __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation of Expressions
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- // TODO(x64): No architecture specific code. Move to shared location.
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
- int original_height = frame_->height();
+bool CodeGenerator::HasValidEntryRegisters() {
+ return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
+ && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
+ && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
+ && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
+ && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
+ && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
+ && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
+ && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
+ && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
+ && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
+}
#endif
- ASSERT(!in_spilled_code());
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(Factory::false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(Factory::true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(Factory::false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(Factory::false_value());
- }
- loaded.Bind();
- }
- }
-
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* x,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- // TODO(X64): Make control flow to control destinations work.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_number()) {
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- if (value.is_smi()) {
- value.Unuse();
- dest->Split(not_zero);
- } else {
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
- value.Unuse();
- dest->Split(not_zero);
- }
- } else {
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
- UNIMPLEMENTED();
- // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- return false;
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If rax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into rax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(rax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->slot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- frame_->Nip(ref->size());
- ref->set_unloaded();
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(rsi)); // do not overwrite context register
- Register context = rsi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(rsp, 0);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = rsi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
- exit.Bind();
- frame_->EmitPush(rcx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsTheHole()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
- } else {
- frame_->Push(&value);
- }
- return;
- }
-
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(rsi);
- frame_->EmitPush(slot->var()->name());
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ movq(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = rsi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ movq(tmp.reg(), context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal);
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- masm_->nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator_->Allocate();
- ASSERT(result->is_valid());
- __ movq(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- *result,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
- done->Branch(not_equal, result);
- __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObject());
- } else {
- Result temp = allocator_->Allocate();
- __ movq(temp.reg(), GlobalObject());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ movq(reg, GlobalObject());
- __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the hole value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(Factory::the_hole_value());
- } else {
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
-
- Variable* arguments = scope()->arguments()->var();
- Variable* shadow = scope()->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has been
- // assigned a proper value.
- skip_arguments = !probe.handle()->IsTheHole();
- } else {
- __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- return frame_->Pop();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->slot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side.handle())->value();
- int right_value = Smi::cast(*right_side.handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side.ToRegister();
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
-
- if (left_side.is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_side.reg());
- }
- } else {
- Condition left_is_smi = masm_->CheckSmi(left_side.reg());
- is_smi.Branch(left_is_smi);
-
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- if (!is_loop_condition && right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- JumpTarget not_number;
- __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- not_number.Branch(not_equal, &left_side);
- __ movsd(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ movl(temp.reg(), Immediate(value));
- __ cvtlsi2sd(xmm0, temp.reg());
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
-
- // Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
- }
-
- left_side = Result(left_reg);
- right_side = Result(right_val);
- // Test smi equality and comparison by signed int comparison.
- // Both sides are smis, so we can use an Immediate.
- __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- dest->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- dest->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- Condition is_smi = masm()->CheckSmi(left_reg);
- is_not_string.Branch(is_smi, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(left_reg, HeapObject::kMapOffset));
- __ movzxbl(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the left hand side has the same type as the right hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, ¬_a_symbol);
- // They are symbols, so do identity compare.
- __ Cmp(left_reg, right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(¬_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ andb(temp.reg(),
- Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask));
- __ cmpb(temp.reg(),
- Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- ASSERT(left_side.reg().is(left_reg));
- right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
- // Test string equality and comparison.
- if (cc == equal) {
- Label comparison_done;
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- Immediate(char_value));
- __ bind(&comparison_done);
- } else {
- __ movq(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzxbl(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
- __ cmpb(temp2.reg(), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ bind(&characters_were_different);
- }
- temp2.Unuse();
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string, or constant null.
- // If either side is a non-smi constant, skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
- is_smi.Branch(both_smi);
-
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ JumpIfSmi(operand->reg(), &smi);
- if (!operand->type_info().IsNumber()) {
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- kScratchRegister);
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
- ¬_numbers);
- LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
- ¬_numbers);
- __ ucomisd(xmm0, xmm1);
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if ((op_ == Token::ADD)
- || (op_ == Token::SUB)
- || (op_ == Token::MUL)
- || (op_ == Token::DIV)) {
- Label call_runtime;
- Label left_smi, right_smi, load_right, do_op;
- __ JumpIfSmi(left_, &left_smi);
- __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ movq(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- __ SmiToInteger32(left_, left_);
- __ cvtlsi2sd(xmm0, left_);
- __ Integer32ToSmi(left_, left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&load_right);
- __ JumpIfSmi(right_, &right_smi);
- __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ movq(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- __ SmiToInteger32(right_, right_);
- __ cvtlsi2sd(xmm1, right_);
- __ Integer32ToSmi(right_, right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
-
- __ bind(&call_runtime);
- }
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // We rely on the fact that smis have a 32 bit payload on x64.
- STATIC_ASSERT(kSmiValueSize == 32);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SAR:
- case Token::SHL:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SHR:
- // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
- }
- } else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
- }
- }
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
// Emit a LoadIC call to get the value from receiver and leave it in
@@ -6901,623 +7563,155 @@
}
-void DeferredInlineSmiAdd::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ explicit DeferredReferenceGetKeyedValue(Register dst,
+ Register receiver,
+ Register key)
+ : dst_(dst), receiver_(receiver), key_(key) {
+ set_comment("[ DeferredReferenceGetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Register key_;
+};
-void DeferredInlineSmiAddReversed::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiSub::Generate() {
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
+void DeferredReferenceGetKeyedValue::Generate() {
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rax)) {
+ __ movq(rax, key_);
+ } // else do nothing.
+ } else if (receiver_.is(rax)) {
+ if (key_.is(rdx)) {
+ __ xchg(rax, rdx);
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
} else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
+ __ movq(rdx, receiver_);
+ __ movq(rax, key_);
}
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rax, key_);
+ __ movq(rdx, receiver_);
}
+ // Calculate the delta from the IC call instruction to the map check
+ // movq instruction in the inlined version. This delta is stored in
+ // a test(rax, delta) instruction after the call so that we can find
+ // it in the IC initialization code and patch the movq instruction.
+ // This means that we cannot allow test instructions after calls to
+ // KeyedLoadIC stubs in other places.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instruction to the
+ // test instruction. We use masm_-> directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ // TODO(X64): Consider whether it's worth switching the test to a
+ // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+ // be generated normally.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- smi_value,
- overwrite_mode);
- }
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiAddConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- // A smi currently fits in a 32-bit Immediate.
- __ SmiSubConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- }
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftArithmeticRightConstant(operand->reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- operand->ToRegister();
-
- // We need rcx to be available to hold operand, and to be spilled.
- // SmiShiftLeft implicitly modifies rcx.
- if (operand->reg().is(rcx)) {
- frame_->Spill(operand->reg());
- answer = allocator()->Allocate();
- } else {
- Result rcx_reg = allocator()->Allocate(rcx);
- // answer must not be rcx.
- answer = allocator()->Allocate();
- // rcx_reg goes out of scope.
- }
-
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
-
- __ Move(answer.reg(), smi_value);
- __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
- operand->Unuse();
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLeftConstant(answer.reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (reversed) {
- // Bit operations with a constant smi are commutative.
- // We can swap left and right operands with no problem.
- // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
- overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
- }
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
- }
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- // Check for negative or non-Smi left hand side.
- __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ Move(operand->reg(), Smi::FromInt(0));
- } else {
- __ SmiAndConstant(operand->reg(),
- operand->reg(),
- Smi::FromInt(int_value - 1));
- }
- deferred->BindExit();
- answer = *operand;
- break; // This break only applies if we generated code for MOD.
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
+ if (!dst_.is(rax)) __ movq(dst_, rax);
}
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred) {
- if (!type.IsSmi()) {
- __ JumpIfNotSmi(reg, deferred->entry_label());
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
}
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(reg);
- }
-}
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+ Label patch_site_;
+};
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- if (!left_info.IsSmi() && !right_info.IsSmi()) {
- __ JumpIfNotBothSmi(left, right, deferred->entry_label());
- } else if (!left_info.IsSmi()) {
- __ JumpIfNotSmi(left, deferred->entry_label());
- } else if (!right_info.IsSmi()) {
- __ JumpIfNotSmi(right, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need rax as the quotient register, rdx as the remainder
- // register, neither left nor right in rax or rdx, and left copied
- // to rax.
- Result quotient;
- Result remainder;
- bool left_is_in_rax = false;
- // Step 1: get rax for quotient.
- if ((left->is_register() && left->reg().is(rax)) ||
- (right->is_register() && right->reg().is(rax))) {
- // One or both is in rax. Use a fresh non-rdx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(rdx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(rax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_rax = true;
- }
- if (right->is_register() && right->reg().is(rax)) {
- quotient = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rax);
+void DeferredReferenceSetKeyedValue::Generate() {
+ __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ // Move value, receiver, and key to registers rax, rdx, and rcx, as
+ // the IC stub expects.
+ // Move value to rax, using xchg if the receiver or key is in rax.
+ if (!value_.is(rax)) {
+ if (!receiver_.is(rax) && !key_.is(rax)) {
+ __ movq(rax, value_);
} else {
- // Neither left nor right is in rax.
- quotient = allocator_->Allocate(rax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(rax));
- ASSERT(!(left->is_register() && left->reg().is(rax)));
- ASSERT(!(right->is_register() && right->reg().is(rax)));
-
- // Step 2: get rdx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(rdx)) ||
- (right->is_register() && right->reg().is(rdx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(rdx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(rdx)) {
- remainder = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rdx);
- } else {
- // Neither left nor right is in rdx.
- remainder = allocator_->Allocate(rdx);
+ __ xchg(rax, value_);
+ // Update receiver_ and key_ if they are affected by the swap.
+ if (receiver_.is(rax)) {
+ receiver_ = value_;
+ } else if (receiver_.is(value_)) {
+ receiver_ = rax;
+ }
+ if (key_.is(rax)) {
+ key_ = value_;
+ } else if (key_.is(value_)) {
+ key_ = rax;
}
}
- ASSERT(remainder.is_register() && remainder.reg().is(rdx));
- ASSERT(!(left->is_register() && left->reg().is(rdx)));
- ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(rax);
- frame_->Spill(rdx);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? rax : rdx,
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- if (op == Token::DIV) {
- __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
+ }
+ // Value is now in rax. Its original location is remembered in value_,
+ // and the value is restored to value_ before returning.
+ // The variables receiver_ and key_ are not preserved.
+ // Move receiver and key to rdx and rcx, swapping if necessary.
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rcx)) {
+ __ movq(rcx, key_);
+ } // Else everything is already in the right place.
+ } else if (receiver_.is(rcx)) {
+ if (key_.is(rdx)) {
+ __ xchg(rcx, rdx);
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
} else {
- ASSERT(op == Token::MOD);
- __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
+ __ movq(rdx, receiver_);
+ __ movq(rcx, key_);
}
- ASSERT(answer.is_valid());
- return answer;
+ } else if (key_.is(rcx)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rcx, key_);
+ __ movq(rdx, receiver_);
}
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of rcx if necessary.
- if (left->is_register() && left->reg().is(rcx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ movq(left->reg(), rcx);
- }
- right->ToRegister(rcx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(rcx));
- ASSERT(right->is_register() && right->reg().is(rcx));
-
- // We will modify right, it must be spilled.
- frame_->Spill(rcx);
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- rcx,
- overwrite_mode);
-
- Label do_op;
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(right->reg());
- }
- __ movq(answer.reg(), left->reg());
- // If left is not known to be a smi, check if it is.
- // If left is not known to be a number, and it isn't a smi, check if
- // it is a HeapNumber.
- if (!left_type_info.IsSmi()) {
- __ JumpIfSmi(answer.reg(), &do_op);
- if (!left_type_info.IsNumber()) {
- // Branch if not a heapnumber.
- __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- deferred->Branch(not_equal);
- }
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we might have overflowed.
- // (False negative for Smi::kMinValue)
- __ cmpq(answer.reg(), Immediate(0x80000000));
- deferred->Branch(equal);
- // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
- __ Integer32ToSmi(answer.reg(), answer.reg());
- } else {
- // Fast case - both are actually smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left->reg());
- }
- }
- } else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
- left_type_info, right_type_info, deferred);
- }
- __ bind(&do_op);
-
- // Perform the operation.
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
- break;
- case Token::SHR: {
- __ SmiShiftLogicalRight(answer.reg(),
- left->reg(),
- rcx,
- deferred->entry_label());
- break;
- }
- case Token::SHL: {
- __ SmiShiftLeft(answer.reg(),
- left->reg(),
- rcx);
- break;
- }
- default:
- UNREACHABLE();
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- switch (op) {
- case Token::ADD:
- __ SmiAdd(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::SUB:
- __ SmiSub(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::MUL: {
- __ SmiMul(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
- }
-
- case Token::BIT_OR:
- __ SmiOr(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_AND:
- __ SmiAnd(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_XOR:
- __ SmiXor(answer.reg(), left->reg(), right->reg());
- break;
-
- default:
- UNREACHABLE();
- break;
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
+ // Call the IC stub.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instructions (initial movq)
+ // to the test instruction. We use masm_-> directly here instead of the
+ // __ macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ // Restore value (returned from store IC).
+ if (!value_.is(rax)) __ movq(value_, rax);
}
@@ -8143,2129 +8337,6 @@
}
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
- switch (op) {
- case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left + right) >= 0) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object == Heap::undefined_value()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-// End of CodeGenerator implementation.
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kPointerSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
- __ bind(&loaded);
- // ST[0] == double value
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- __ movq(rax, ExternalReference::transcendental_cache_array_address());
- // rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
-
- __ bind(&cache_miss);
- // Update cache with new value.
- Label nan_result;
- GenerateOperation(masm, &nan_result);
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-
- __ bind(&nan_result);
- __ fstp(0); // Remove argument from FPU stack.
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ ret(kPointerSize);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
- Label* on_nan_result) {
- // Registers:
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
-
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- __ j(equal, on_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
-
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
-
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
-
- // Either zero or Smi::kMinValue, neither of which become a smi when
- // negated.
- if (negative_zero_ == kStrictNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
- } else {
- __ jmp(&slow);
- }
-
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ movq(kScratchRegister, Operand(kScratchRegister, 0));
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
-
-
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rcx);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // rcx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
- __ j(above, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rax, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: Subject string.
- // rcx: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
- __ j(above_equal, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
- __ j(greater, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
- ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- ASSERT(kExternalStringTag !=0);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
- __ Cmp(rdx, Factory::empty_string());
- __ j(not_equal, &runtime);
- __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
- ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // rax: subject string (sequential ascii)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rdi, 1); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // rax: subject string (flat two-byte)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
- __ Set(rdi, 0); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
-
- // rax: subject string
- // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
-
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
-
- // rsi is caller save on Windows and used to pass parameter on Linux.
- __ push(rsi);
-
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments);
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
-#endif
-
- // Argument 5: static offsets vector buffer.
- __ movq(r8, ExternalReference::address_of_static_offsets_vector());
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
-#endif
-
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ testb(rdi, rdi);
- __ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
- __ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
-
- __ bind(&setup_rest);
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
-
- // Argument 1: Subject string.
- __ movq(arg1, rax);
-
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r11, kRegExpExecuteArguments);
-
- // rsi is caller save, as it is used to pass parameter.
- __ pop(rsi);
-
- // Check the result.
- Label success;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
- Label failure;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- __ j(equal, &failure);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ Cmp(kScratchRegister, Factory::the_hole_value());
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ Move(rax, Factory::null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
-
- // rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
-
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
-
- // Get the static offsets vector filled by the native regexp code.
- __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
-
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi, &runtime);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
-
- ASSERT_EQ(8, kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- masm->RecordWriteHelper(object_, addr_, scratch_);
- masm->ret(0);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- Label check_unequal_objects, done;
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- Label not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, ¬_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
- __ Set(rax, NegativeComparisonResult(cc_));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
-
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, ¬_identical);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
- }
-
- __ bind(¬_identical);
- }
-
- if (cc_ == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
-
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
-
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(¬_smis);
- }
-
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // If the first object is a JS object, we have done pointer comparison.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
-
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rax);
- __ push(rdx);
- __ push(rcx);
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(2 * kPointerSize); // rax, rdx were pushed
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(2 * kPointerSize); // rax, rdx were pushed
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
-
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, ¬_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, ¬_both_objects);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(2 * kPointerSize); // rax, rdx were pushed
- __ bind(¬_both_objects);
- }
-
- // must swap argument order
- __ pop(rcx);
- __ pop(rdx);
- __ pop(rax);
- __ push(rdx);
- __ push(rax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
- }
-
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // rdx is function, rax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- ASSERT_EQ(0, kSmiTag);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi, offset));
-
- // Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ SmiTest(rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack and untag the length.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
- StackHandlerConstants::kStateOffset);
- ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
- StackHandlerConstants::kPCOffset);
-
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- // get next in chain
- __ pop(rcx);
- __ movq(Operand(kScratchRegister, 0), rcx);
- __ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ xor_(rsi, rsi); // tentatively set context pointer to NULL
- Label skip;
- __ cmpq(rbp, Immediate(0));
- __ j(equal, &skip);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
- __ ret(0);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ incl(Operand(kScratchRegister, 0));
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, Operand(rsp, 4 * kPointerSize));
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, Operand(rsp, 6 * kPointerSize));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, Operand(rsp, 4 * kPointerSize));
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r12); // argv.
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
-
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ decl(Operand(kScratchRegister, 0));
- }
-
- // Check for failure result.
- Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_, result_size_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
-
- // Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ movq(rax, Operand(kScratchRegister, 0));
- __ movq(rdx, ExternalReference::the_hole_value_location());
- __ movq(rdx, Operand(rdx, 0));
- __ movq(Operand(kScratchRegister, 0), rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ movq(rsp, Operand(rsp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- __ movq(kScratchRegister, handler_address);
- __ pop(Operand(kScratchRegister, 0));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ movq(rax, Immediate(false));
- __ store_rax(external_caught);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ store_rax(pending_exception);
- }
-
- // Clear the context pointer.
- __ xor_(rsi, rsi);
-
- // Restore registers from handler.
- ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
- StackHandlerConstants::kFPOffset);
- __ pop(rbp); // FP
- ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
- StackHandlerConstants::kStateOffset);
- __ pop(rdx); // State
-
- ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
- StackHandlerConstants::kPCOffset);
- __ ret(0);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, result_size_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- UNREACHABLE();
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ load_rax(c_entry_fp);
- __ push(rax);
-
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
- __ InitializeSmiConstantRegister();
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ load_rax(js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, ¬_outermost_js);
- __ movq(rax, rbp);
- __ store_rax(js_entry_sp);
- __ bind(¬_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ store_rax(pending_exception);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ load_rax(ExternalReference::the_hole_value_location());
- __ store_rax(pending_exception);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ load_rax(construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ load_rax(entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
-
- // Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- __ pop(Operand(kScratchRegister, 0));
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
- __ j(not_equal, ¬_outermost_js_2);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(¬_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
- __ pop(Operand(kScratchRegister, 0));
-
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of stubs.
-
-// Stub classes have public member named masm, not masm_.
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(rax);
- __ Push(Smi::FromInt(0));
- __ push(rax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
-
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
void GenericBinaryOpStub::GenerateCall(
MacroAssembler* masm,
Register left,
@@ -10962,12 +9033,1990 @@
}
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kPointerSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+ __ bind(&loaded);
+ // ST[0] == double value
+ // rbx = bits of double value.
+ // rdx = also bits of double value.
+ // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
+ // h = h0 = bits ^ (bits >> 32);
+ // h ^= h >> 16;
+ // h ^= h >> 8;
+ // h = h & (cacheSize - 1);
+ // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
+ __ sar(rdx, Immediate(32));
+ __ xorl(rdx, rbx);
+ __ movl(rcx, rdx);
+ __ movl(rax, rdx);
+ __ movl(rdi, rdx);
+ __ sarl(rdx, Immediate(8));
+ __ sarl(rcx, Immediate(16));
+ __ sarl(rax, Immediate(24));
+ __ xorl(rcx, rdx);
+ __ xorl(rax, rdi);
+ __ xorl(rcx, rax);
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // rbx = bits of double value.
+ // rcx = TranscendentalCache::hash(double value).
+ __ movq(rax, ExternalReference::transcendental_cache_array_address());
+ // rax points to cache array.
+ __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // rax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ testq(rax, rax);
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { // NOLINT - doesn't like a single brace on a line.
+ TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ // Two uint_32's and a pointer per element.
+ CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
+ CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
+ CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+ }
+#endif
+ // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
+ __ addl(rcx, rcx);
+ __ lea(rcx, Operand(rax, rcx, times_8, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmpq(rbx, Operand(rcx, 0));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ movq(rax, Operand(rcx, 2 * kIntSize));
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ Label nan_result;
+ GenerateOperation(masm, &nan_result);
+ __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+
+ __ bind(&nan_result);
+ __ fstp(0); // Remove argument from FPU stack.
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ ret(kPointerSize);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
+ Label* on_nan_result) {
+ // Registers:
+ // rbx: Bits of input double. Must be preserved.
+ // rcx: Pointer to cache entry. Must be preserved.
+ // st(0): Input double
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ movq(rdi, rbx);
+ // Move exponent and sign bits to low bits.
+ __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+ // Remove sign bit.
+ __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
+ int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+ __ cmpl(rdi, Immediate(supported_exponent_limit));
+ __ j(below, &in_range);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmpl(rdi, Immediate(0x7ff));
+ __ j(equal, on_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ // FPU Stack: input % 2*pi, 2*pi,
+ __ fstp(0);
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done);
+
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
+ } else {
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
+ }
+
+ __ bind(&done);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ // Check float operands.
+ Label done;
+ Label rax_is_smi;
+ Label rax_is_object;
+ Label rdx_is_object;
+
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ JumpIfSmi(rax, &rax_is_smi);
+
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
+
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ bind(&rax_is_smi);
+ __ SmiToInteger32(rcx, rax);
+
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ Label* conversion_failure,
+ Register heap_number_map) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, rdx);
+
+ // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, rcx, rax);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+}
+
+
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
+
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+
+ if (negative_zero_ == kIgnoreNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(equal, &done);
+ }
+
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
+
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ if (negative_zero_ == kStrictNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+ } else {
+ __ jmp(&slow);
+ }
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
+
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ }
+
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in rdx and the parameter count is in rax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(rdx, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register rax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpq(rdx, rax);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpq(rdx, rcx);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(rbx); // Return address.
+ __ push(rdx);
+ __ push(rbx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
+ // Do not clobber the length index for the indexing operation since
+ // it is used compute the size for allocation later.
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ testl(rcx, rcx);
+ __ j(zero, &add_arguments_object);
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rdi, offset));
+
+ // Copy the JS object part.
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ SmiTest(rcx);
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ addq(rdi, Immediate(kPointerSize));
+ __ subq(rdx, Immediate(kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[8]: last_match_info (expected JSArray)
+ // esp[16]: previous index
+ // esp[24]: subject string
+ // esp[32]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ testq(kScratchRegister, kScratchRegister);
+ __ j(zero, &runtime);
+
+
+ // Check that the first argument is a JSRegExp object.
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ Condition is_smi = masm->CheckSmi(rcx);
+ __ Check(NegateCondition(is_smi),
+ "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // rcx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
+ __ j(not_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rdx, rdx, times_1, 2));
+ // Check that the static offsets vector buffer is large enough.
+ __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the second argument is a string.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rax, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: Subject string.
+ // rcx: RegExp data (FixedArray).
+ // rdx: Number of capture registers.
+ // Check that the third argument is a positive smi less than the string
+ // length. A negative value will be greater (unsigned comparison).
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ andb(rbx, Immediate(
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ ASSERT(kExternalStringTag !=0);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ Cmp(rdx, Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask | kStringEncodingMask));
+ ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
+
+ __ bind(&seq_ascii_string);
+ // rax: subject string (sequential ascii)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rdi, 1); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // rax: subject string (flat two-byte)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rdi, 0); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+
+ // rax: subject string
+ // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r11: code
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // rsi is caller save on Windows and used to pass parameter on Linux.
+ __ push(rsi);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments);
+ int argument_slots_on_stack =
+ masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movq(r9, Operand(kScratchRegister, 0));
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ addq(r9, Operand(kScratchRegister, 0));
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+#endif
+
+ // Argument 5: static offsets vector buffer.
+ __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ // Argument 5 passed in r8 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+#endif
+
+ // First four arguments are passed in registers on both Linux and Windows.
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // Keep track on aliasing between argX defined above and the registers used.
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ testb(rdi, rdi);
+ __ j(zero, &setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ jmp(&setup_rest);
+ __ bind(&setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+
+ __ bind(&setup_rest);
+ // Argument 2: Previous index.
+ __ movq(arg2, rbx);
+
+ // Argument 1: Subject string.
+ __ movq(arg1, rax);
+
+ // Locate the code entry and call it.
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
+
+ // rsi is caller save, as it is used to pass parameter.
+ __ pop(rsi);
+
+ // Check the result.
+ Label success;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ j(equal, &success);
+ Label failure;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ j(equal, &failure);
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ Cmp(kScratchRegister, Factory::the_hole_value());
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ Move(rax, Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ SmiToInteger32(rax,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rax, rax, times_1, 2));
+
+ // rdx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rdx: number of capture registers
+ // Store the capture count.
+ __ Integer32ToSmi(kScratchRegister, rdx);
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ kScratchRegister);
+ // Store last subject and last input.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rcx: offsets vector
+ // rdx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ subq(rdx, Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer and make it a smi.
+ __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
+ __ Integer32ToSmi(rdi, rdi, &runtime);
+ // Store the smi value in the last match info.
+ __ movq(FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ rdi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shrl(mask, Immediate(1));
+ __ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+
+ ASSERT_EQ(8, kDoubleSize);
+ __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ Register probe = mask;
+ __ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
+ __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
+
+ __ bind(&is_smi);
+ __ SmiToInteger32(scratch, object);
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask) {
+ __ and_(hash, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ __ shl(hash, Immediate(kPointerSizeLog2 + 1));
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ movq(rbx, Operand(rsp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ Label check_unequal_objects, done;
+ // The compare stub returns a positive, negative, or zero 64-bit integer
+ // value in rax, corresponding to result of comparing the two inputs.
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Two identical objects are equal unless they are both NaN or undefined.
+ {
+ Label not_identical;
+ __ cmpq(rax, rdx);
+ __ j(not_equal, ¬_identical);
+
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &check_for_nan);
+ __ Set(rax, NegativeComparisonResult(cc_));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ // We cannot set rax to EQUAL until just before return because
+ // rax must be unchanged on jump to not_identical.
+
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(rax, EQUAL);
+ __ ret(0);
+ } else {
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, ¬_identical);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc_ == greater_equal || cc_ == greater) {
+ __ neg(rax);
+ }
+ __ ret(0);
+ }
+
+ __ bind(¬_identical);
+ }
+
+ if (cc_ == equal) { // Both strict and non-strict.
+ Label slow; // Fallthrough label.
+
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ if (strict_) {
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ {
+ Label not_smis;
+ __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
+
+ // Check if the non-smi operand is a heap number.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal. ebx (the lower half of rbx) is not zero.
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(¬_smis);
+ }
+
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // If the first object is a JS object, we have done pointer comparison.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object);
+ // Return non-zero (eax (not rax) is not zero)
+ Label return_not_equal;
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ }
+ __ bind(&slow);
+ }
+
+ // Push arguments below the return address to prepare jump to builtin.
+ __ pop(rcx);
+ __ push(rax);
+ __ push(rdx);
+ __ push(rcx);
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
+ }
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
+
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax (not rax) already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(2 * kPointerSize);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(
+ rdx, rax, rcx, rbx, &check_unequal_objects);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Not strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects, return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(static_cast<int64_t>(1), kSmiTagMask);
+ __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, ¬_both_objects);
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(rax, EQUAL);
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
+ // must swap argument order
+ __ pop(rcx);
+ __ pop(rdx);
+ __ pop(rax);
+ __ push(rdx);
+ __ push(rax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ }
+
+ // Restore return address on the stack.
+ __ push(rcx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ Push(Smi::FromInt(0));
+ __ push(rax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ StackHandlerConstants::kStateOffset);
+ ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ StackHandlerConstants::kPCOffset);
+
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ __ pop(rcx);
+ __ movq(Operand(kScratchRegister, 0), rcx);
+ __ pop(rbp); // pop frame pointer
+ __ pop(rdx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ xor_(rsi, rsi); // tentatively set context pointer to NULL
+ Label skip;
+ __ cmpq(rbp, Immediate(0));
+ __ j(equal, &skip);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+ __ ret(0);
+}
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ UNREACHABLE();
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // rax: result parameter for PerformGC, if any.
+ // rbx: pointer to C function (C callee-saved).
+ // rbp: frame pointer (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
+ // This pointer is reused in LeaveExitFrame(), so it is stored in a
+ // callee-saved register.
+
+ // Simple results returned in rax (both AMD64 and Win64 calling conventions).
+ // Complex results must be written to address passed as first argument.
+ // AMD64 calling convention: a struct of two pointers in rax+rdx
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack is known to be aligned. This function takes one argument which is
+ // passed in register.
+#ifdef _WIN64
+ __ movq(rcx, rax);
+#else // _WIN64
+ __ movq(rdi, rax);
+#endif
+ __ movq(kScratchRegister,
+ FUNCTION_ADDR(Runtime::PerformGC),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(kScratchRegister);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ incl(Operand(kScratchRegister, 0));
+ }
+
+ // Call C function.
+#ifdef _WIN64
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+ // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
+ __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
+ if (result_size_ < 2) {
+ // Pass a pointer to the Arguments object as the first argument.
+ // Return result in single register (rax).
+ __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+ } else {
+ ASSERT_EQ(2, result_size_);
+ // Pass a pointer to the result location as the first argument.
+ __ lea(rcx, Operand(rsp, 6 * kPointerSize));
+ // Pass a pointer to the Arguments object as the second argument.
+ __ lea(rdx, Operand(rsp, 4 * kPointerSize));
+ }
+
+#else // _WIN64
+ // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+ __ movq(rdi, r14); // argc.
+ __ movq(rsi, r12); // argv.
+#endif
+ __ call(rbx);
+ // Result is in rax - do not destroy this register!
+
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ decl(Operand(kScratchRegister, 0));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+#ifdef _WIN64
+ // If return value is on the stack, pop it to registers.
+ if (result_size_ > 1) {
+ ASSERT_EQ(2, result_size_);
+ // Read result values stored on stack. Result is stored
+ // above the four argument mirror slots and the two
+ // Arguments object slots.
+ __ movq(rax, Operand(rsp, 6 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ }
+#endif
+ __ lea(rcx, Operand(rax, 1));
+ // Lower 2 bits of rcx are 0 iff rax has failure tag.
+ __ testl(rcx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame(mode_, result_size_);
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry);
+
+ // Special handling of out of memory exceptions.
+ __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ cmpq(rax, kScratchRegister);
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ movq(rax, Operand(kScratchRegister, 0));
+ __ movq(rdx, ExternalReference::the_hole_value_location());
+ __ movq(rdx, Operand(rdx, 0));
+ __ movq(Operand(kScratchRegister, 0), rdx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+ __ j(equal, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ movq(rsp, Operand(rsp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ movq(kScratchRegister, handler_address);
+ __ pop(Operand(kScratchRegister, 0));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ movq(rax, Immediate(false));
+ __ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ store_rax(pending_exception);
+ }
+
+ // Clear the context pointer.
+ __ xor_(rsi, rsi);
+
+ // Restore registers from handler.
+ ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+ StackHandlerConstants::kFPOffset);
+ __ pop(rbp); // FP
+ ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ StackHandlerConstants::kStateOffset);
+ __ pop(rdx); // State
+
+ ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ StackHandlerConstants::kPCOffset);
+ __ ret(0);
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(mode_, result_size_);
+
+ // rax: Holds the context at this point, but should not be used.
+ // On entry to code generated by GenerateCore, it must hold
+ // a failure result if the collect_garbage argument to GenerateCore
+ // is true. This failure result can be the result of code
+ // generated by a previous call to GenerateCore. The value
+ // of rax is then passed to Runtime::PerformGC.
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: argv pointer (C callee-saved).
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ movq(rax, failure, RelocInfo::NONE);
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Push the stack frame type marker twice.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
+ __ push(rbx);
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ load_rax(c_entry_fp);
+ __ push(rax);
+
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, ¬_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(¬_outermost_js);
+#endif
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+ // Clear any pending exceptions.
+ __ load_rax(ExternalReference::the_hole_value_location());
+ __ store_rax(pending_exception);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. We load the address
+ // from an external reference instead of inlining the call target address
+ // directly in the code, because the builtin stubs may not have been
+ // generated yet at the time this code is generated.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ load_rax(construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ load_rax(entry);
+ }
+ __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ call(kScratchRegister);
+
+ // Unlink this frame from the handler chain.
+ __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ pop(Operand(kScratchRegister, 0));
+ // Pop next_sp.
+ __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, ¬_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(¬_outermost_js_2);
+#endif
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ pop(Operand(kScratchRegister, 0));
+
+ // Restore callee-saved registers (X64 conventions).
+ __ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
+ __ pop(rsi);
+ __ pop(rdi);
+#endif
+ __ pop(r15);
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+ __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(rbp);
+ __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+ // Returns a bitwise zero to indicate that the value
+ // is and instance of the function and anything else to
+ // indicate that the value is not an instance.
+
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ JumpIfSmi(rax, &slow);
+
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ // rdx is function, rax is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(rbx, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Register mapping:
+ // rax is object map.
+ // rdx is function.
+ // rbx is function prototype.
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ // The code at is_not_instance assumes that kScratchRegister contains a
+ // non-zero GCable value (the null object in this case).
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ ASSERT_EQ(0, kSmiTag);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
// condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
@@ -10977,6 +11026,8 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
@@ -11494,7 +11545,7 @@
// Make count the number of bytes to copy.
if (!ascii) {
- ASSERT_EQ(2, sizeof(uc16)); // NOLINT
+ ASSERT_EQ(2, static_cast<int>(sizeof(uc16))); // NOLINT
__ addl(count, count);
}
@@ -12067,6 +12118,11 @@
#undef __
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ masm->RecordWriteHelper(object_, addr_, scratch_);
+ masm->ret(0);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index b9a3b70..dc6f583 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -521,6 +521,17 @@
Condition cc,
bool strict,
ControlDestination* destination);
+
+ // If at least one of the sides is a constant smi, generate optimized code.
+ void ConstantSmiComparison(Condition cc,
+ bool strict,
+ ControlDestination* destination,
+ Result* left_side,
+ Result* right_side,
+ bool left_side_constant_smi,
+ bool right_side_constant_smi,
+ bool is_loop_condition);
+
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
@@ -578,6 +589,7 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index c6be503..0b3b7c4 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1991,6 +1991,25 @@
}
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(above_equal, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2243,11 +2262,8 @@
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- // To allocate a heap number, and ensure that it is not a smi, we
- // call the runtime function FUnaryMinus on 0, returning the double
- // -0.0. A new, distinct heap number is returned each time.
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
__ movq(rbx, rax);
__ bind(&heapnumber_allocated);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 76200d7..a5634a7 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2322,101 +2322,6 @@
}
-Register MacroAssembler::CheckMaps(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between scratch and the other
- // registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- movq(Operand(rsp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- while (object != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- JSObject* prototype = JSObject::cast(object->GetPrototype());
- if (Heap::InNewSpace(prototype)) {
- // Get the map of the current object.
- movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- Cmp(scratch, Handle<Map>(object->map()));
- // Branch on the result of the map check.
- j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
-
- } else {
- // Check the map of the current object.
- Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- // Branch on the result of the map check.
- j(not_equal, miss);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- Move(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- movq(Operand(rsp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- object = prototype;
- }
-
- // Check the holder map.
- Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
- j(not_equal, miss);
-
- // Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(object == holder);
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- return reg;
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index a256ab8..64f35e1 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -596,24 +596,6 @@
// ---------------------------------------------------------------------------
// Inline caching support
- // Generates code that verifies that the maps of objects in the
- // prototype chain of object hasn't changed since the code was
- // generated and branches to the miss label if any map has. If
- // necessary the function also generates code for security check
- // in case of global object holders. The scratch and holder
- // registers are always clobbered, but the object register is only
- // clobbered if it the same as the holder register. The function
- // returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [rsp + kPointerSize].
- Register CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss);
-
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register and kScratchRegister,
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 383399e..8031864 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -960,7 +960,6 @@
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
- NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index ab75b96..2a918f1 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -81,6 +81,106 @@
}
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register r1) {
+ ASSERT(name->IsSymbol());
+ __ IncrementCounter(&Counters::negative_lookups, 1);
+ __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+
+ Label done;
+ __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ testb(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = r0;
+ __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r1;
+ // Capacity is smi 2^n.
+ __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+ __ decl(index);
+ __ and_(index,
+ Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = r1;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ movq(entity_name, Operand(properties, index, times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ Cmp(entity_name, Factory::undefined_value());
+ // __ jmp(miss_label);
+ if (i != kProbes - 1) {
+ __ j(equal, &done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss_label);
+
+ // Check if the entry name is not a symbol.
+ __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, miss_label);
+ } else {
+ // Give up probing if still not found the undefined value.
+ __ j(not_equal, miss_label);
+ }
+ }
+
+ __ bind(&done);
+ __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+}
+
+
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
@@ -497,6 +597,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -512,6 +613,7 @@
receiver,
scratch1,
scratch2,
+ scratch3,
holder,
lookup,
name,
@@ -523,6 +625,7 @@
receiver,
scratch1,
scratch2,
+ scratch3,
name,
holder,
miss);
@@ -535,6 +638,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
JSObject* interceptor_holder,
LookupResult* lookup,
String* name,
@@ -574,7 +678,7 @@
Register holder =
stub_compiler_->CheckPrototypes(object, receiver,
interceptor_holder, scratch1,
- scratch2, name, depth1, miss);
+ scratch2, scratch3, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -590,7 +694,7 @@
if (interceptor_holder != lookup->holder()) {
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ scratch2, scratch3, name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -626,12 +730,13 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
JSObject* interceptor_holder,
Label* miss_label) {
Register holder =
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
+ scratch1, scratch2, scratch3, name,
miss_label);
__ EnterInternalFrame();
@@ -784,7 +889,7 @@
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, name, depth, &miss);
+ rbx, rax, rdi, name, depth, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -807,7 +912,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, name, &miss);
+ rbx, rdx, rdi, name, &miss);
}
break;
@@ -826,7 +931,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, name, &miss);
+ rbx, rdx, rdi, name, &miss);
}
break;
}
@@ -847,7 +952,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
- rbx, rdx, name, &miss);
+ rbx, rdx, rdi, name, &miss);
}
break;
}
@@ -902,7 +1007,8 @@
__ JumpIfSmi(rdx, &miss);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
+ Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
+ name, &miss);
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
@@ -965,6 +1071,7 @@
holder,
rbx,
rax,
+ rdi,
name,
&miss);
@@ -1119,7 +1226,7 @@
CheckPrototypes(JSObject::cast(object), rdx,
holder, rbx,
- rax, name, &miss);
+ rax, rdi, name, &miss);
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1226,6 +1333,7 @@
rdx,
rbx,
rdi,
+ rax,
&miss);
// Restore receiver.
@@ -1288,7 +1396,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
+ CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, &miss);
// Get the value from the cell.
__ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
@@ -1353,7 +1461,7 @@
Label miss;
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+ bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, rdi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1376,7 +1484,7 @@
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
+ GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1401,7 +1509,7 @@
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
- CheckPrototypes(object, rax, last, rbx, rdx, name, &miss);
+ CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1438,7 +1546,7 @@
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
+ GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1469,6 +1577,7 @@
rcx,
rdx,
rbx,
+ rdi,
name,
&miss);
@@ -1500,7 +1609,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
+ CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
// Get the value from the cell.
__ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
@@ -1546,7 +1655,7 @@
__ j(not_equal, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx,
+ bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1600,7 +1709,7 @@
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadConstant(receiver, holder, rdx, rbx, rcx,
+ GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -1660,6 +1769,7 @@
rax,
rcx,
rbx,
+ rdi,
name,
&miss);
__ bind(&miss);
@@ -1875,7 +1985,7 @@
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadField(receiver, holder, rdx, rbx, rcx, index, name, &miss);
+ GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -1954,6 +2064,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
ASSERT(interceptor_holder->HasNamedInterceptor());
@@ -1981,7 +2092,8 @@
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
// Save necessary data before invoking an interceptor.
@@ -2029,6 +2141,7 @@
lookup->holder(),
scratch1,
scratch2,
+ scratch3,
name,
miss);
}
@@ -2068,7 +2181,8 @@
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3,
+ name, miss);
__ pop(scratch2); // save old return address
PushInterceptorArguments(masm(), receiver, holder_reg,
name_reg, interceptor_holder);
@@ -2087,6 +2201,7 @@
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
@@ -2097,7 +2212,7 @@
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
@@ -2122,41 +2237,143 @@
Register object_reg,
JSObject* holder,
Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
String* name,
int save_at_depth,
- Label* miss,
- Register extra) {
- // Check that the maps haven't changed.
- Register result =
- masm()->CheckMaps(object,
- object_reg,
- holder,
- holder_reg,
- scratch,
- save_at_depth,
- miss);
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg. On the first
+ // iteration, reg is an alias for object_reg, on later iterations,
+ // it is an alias for holder_reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ movq(Operand(rsp, kPointerSize), object_reg);
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ JSObject* current = object;
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ Object* lookup_result = Heap::LookupSymbol(name);
+ if (lookup_result->IsFailure()) {
+ set_failure(Failure::cast(lookup_result));
+ return reg;
+ } else {
+ name = String::cast(lookup_result);
+ }
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // from now the object is in holder_reg
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else if (Heap::InNewSpace(prototype)) {
+ // Get the map of the current object.
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ Cmp(scratch1, Handle<Map>(current->map()));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+
+ // Restore scratch register to be the map of the object.
+ // We load the prototype from the map in the scratch register.
+ __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+
+ } else {
+ // Check the map of the current object.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Handle<Map>(current->map()));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ Move(reg, Handle<JSObject>(prototype));
+ }
+
+ if (save_at_depth == depth) {
+ __ movq(Operand(rsp, kPointerSize), reg);
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+
+ // Check the holder map.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
+ __ j(not_equal, miss);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth + 1));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(current == holder);
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- while (object != holder) {
- if (object->IsGlobalObject()) {
+ current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
Object* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(object),
+ GlobalObject::cast(current),
name,
- scratch,
+ scratch1,
miss);
if (cell->IsFailure()) {
set_failure(Failure::cast(cell));
- return result;
+ return reg;
}
}
- object = JSObject::cast(object->GetPrototype());
+ current = JSObject::cast(current->GetPrototype());
}
// Return the register containing the holder.
- return result;
+ return reg;
}
@@ -2165,6 +2382,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -2174,7 +2392,7 @@
// Check the prototype chain.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -2187,6 +2405,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -2196,7 +2415,7 @@
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ Move(rax, Handle<Object>(value));
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 330ca5b..bd6108c 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -470,7 +470,10 @@
i::Heap::CollectGarbage(0, i::NEW_SPACE);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
- Local<String> small_string = String::New(AsciiToTwoByteString("small"));
+ uint16_t* two_byte_string = AsciiToTwoByteString("small");
+ Local<String> small_string = String::New(two_byte_string);
+ i::DeleteArray(two_byte_string);
+
// We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
@@ -479,7 +482,10 @@
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
- small_string = String::New(AsciiToTwoByteString("small 2"));
+ two_byte_string = AsciiToTwoByteString("small 2");
+ small_string = String::New(two_byte_string);
+ i::DeleteArray(two_byte_string);
+
// We should refuse externalizing newly created small string.
CHECK(!small_string->CanMakeExternal());
for (int i = 0; i < 100; i++) {
@@ -492,8 +498,11 @@
char* buf = i::NewArray<char>(buf_size);
memset(buf, 'a', buf_size);
buf[buf_size - 1] = '\0';
- Local<String> large_string = String::New(AsciiToTwoByteString(buf));
+
+ two_byte_string = AsciiToTwoByteString(buf);
+ Local<String> large_string = String::New(two_byte_string);
i::DeleteArray(buf);
+ i::DeleteArray(two_byte_string);
// Large strings should be immediately accepted.
CHECK(large_string->CanMakeExternal());
}
@@ -688,7 +697,11 @@
const char* two_byte_string_2 = "a_times_two_plus_b(4, 8) + ";
const char* two_byte_extern_2 = "a_times_two_plus_b(1, 2);";
Local<String> left = v8_str(one_byte_string_1);
- Local<String> right = String::New(AsciiToTwoByteString(two_byte_string_1));
+
+ uint16_t* two_byte_source = AsciiToTwoByteString(two_byte_string_1);
+ Local<String> right = String::New(two_byte_source);
+ i::DeleteArray(two_byte_source);
+
Local<String> source = String::Concat(left, right);
right = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_extern_1)));
@@ -698,7 +711,11 @@
source = String::Concat(source, right);
right = v8_str(one_byte_string_2);
source = String::Concat(source, right);
- right = String::New(AsciiToTwoByteString(two_byte_string_2));
+
+ two_byte_source = AsciiToTwoByteString(two_byte_string_2);
+ right = String::New(two_byte_source);
+ i::DeleteArray(two_byte_source);
+
source = String::Concat(source, right);
right = String::NewExternal(
new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
@@ -3821,9 +3838,10 @@
THREADED_TEST(WeakReference) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New();
+ Whammy* whammy = new Whammy();
templ->SetNamedPropertyHandler(WhammyPropertyGetter,
0, 0, 0, 0,
- v8::External::New(new Whammy()));
+ v8::External::New(whammy));
const char* extension_list[] = { "v8/gc" };
v8::ExtensionConfiguration extensions(1, extension_list);
v8::Persistent<Context> context = Context::New(&extensions);
@@ -3842,7 +3860,7 @@
"4";
v8::Handle<Value> result = CompileRun(code);
CHECK_EQ(4.0, result->NumberValue());
-
+ delete whammy;
context.Dispose();
}
@@ -8612,20 +8630,31 @@
v8::HandleScope scope;
const char* cstring = "function foo(a) { return a+1; }";
+
v8::ScriptData* sd_from_cstring =
v8::ScriptData::PreCompile(cstring, i::StrLength(cstring));
TestAsciiResource* resource = new TestAsciiResource(cstring);
- v8::ScriptData* sd_from_istring = v8::ScriptData::PreCompile(
+ v8::ScriptData* sd_from_external_string = v8::ScriptData::PreCompile(
v8::String::NewExternal(resource));
- CHECK_EQ(sd_from_cstring->Length(), sd_from_istring->Length());
+ v8::ScriptData* sd_from_string = v8::ScriptData::PreCompile(
+ v8::String::New(cstring));
+
+ CHECK_EQ(sd_from_cstring->Length(), sd_from_external_string->Length());
CHECK_EQ(0, memcmp(sd_from_cstring->Data(),
- sd_from_istring->Data(),
+ sd_from_external_string->Data(),
sd_from_cstring->Length()));
+ CHECK_EQ(sd_from_cstring->Length(), sd_from_string->Length());
+ CHECK_EQ(0, memcmp(sd_from_cstring->Data(),
+ sd_from_string->Data(),
+ sd_from_cstring->Length()));
+
+
delete sd_from_cstring;
- delete sd_from_istring;
+ delete sd_from_external_string;
+ delete sd_from_string;
}
@@ -9049,6 +9078,7 @@
CHECK_EQ(String::New(expected_slice_on_cons),
env->Global()->Get(v8_str("slice_on_cons")));
}
+ i::DeleteArray(two_byte_string);
}
@@ -9073,6 +9103,7 @@
i::StrLength(ascii_sources[i])));
v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource);
v8::Script::Compile(source);
+ i::DeleteArray(two_byte_string);
}
}
@@ -10350,6 +10381,40 @@
}
+static void StackTraceForUncaughtExceptionListener(
+ v8::Handle<v8::Message> message,
+ v8::Handle<Value>) {
+ v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK_EQ(2, stack_trace->GetFrameCount());
+ checkStackFrame("origin", "foo", 2, 3, false, false,
+ stack_trace->GetFrame(0));
+ checkStackFrame("origin", "bar", 5, 3, false, false,
+ stack_trace->GetFrame(1));
+}
+
+TEST(CaptureStackTraceForUncaughtException) {
+ report_count = 0;
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener);
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+
+ Script::Compile(v8_str("function foo() {\n"
+ " throw 1;\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};"),
+ v8_str("origin"))->Run();
+ v8::Local<v8::Object> global = env->Global();
+ Local<Value> trouble = global->Get(v8_str("bar"));
+ CHECK(trouble->IsFunction());
+ Function::Cast(*trouble)->Call(global, 0, NULL);
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+ v8::V8::RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
+}
+
+
// Test that idle notification can be handled and eventually returns true.
THREADED_TEST(IdleNotification) {
bool rv = false;
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 3058c6f..5e49c0c 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -70,7 +70,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -107,7 +106,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -153,7 +151,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -201,7 +198,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -261,7 +257,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -301,7 +296,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index e499c6f..b60865d 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -70,7 +70,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -108,7 +107,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -150,7 +148,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -185,7 +182,6 @@
assm.GetCode(&desc);
Code* code =
Code::cast(Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value())));
// don't print the code - our disassembler can't handle cvttss2si
@@ -220,7 +216,6 @@
assm.GetCode(&desc);
Code* code =
Code::cast(Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value())));
// don't print the code - our disassembler can't handle cvttsd2si
@@ -250,7 +245,6 @@
assm.GetCode(&desc);
Code* code =
Code::cast(Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value())));
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -288,7 +282,6 @@
assm.GetCode(&desc);
Code* code =
Code::cast(Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value())));
#ifdef DEBUG
@@ -329,7 +322,6 @@
assm.GetCode(&desc);
Code* code =
Code::cast(Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value())));
CHECK(code->IsCode());
@@ -385,7 +377,6 @@
assm.GetCode(&desc);
Code* code =
Code::cast(Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value())));
CHECK(code->IsCode());
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 8ebf752..6a94bed 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -6650,4 +6650,69 @@
CheckDebuggerUnloaded();
}
+
+static void* expected_break_data;
+static bool was_debug_break_called;
+static bool was_debug_event_called;
+static void DebugEventBreakDataChecker(const v8::Debug::EventDetails& details) {
+ if (details.GetEvent() == v8::BreakForCommand) {
+ CHECK_EQ(expected_break_data, details.GetClientData());
+ was_debug_event_called = true;
+ } else if (details.GetEvent() == v8::Break) {
+ was_debug_break_called = true;
+ }
+}
+
+// Check that event details contain context where debug event occured.
+TEST(DebugEventBreakData) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener2(DebugEventBreakDataChecker);
+
+ TestClientData::constructor_call_counter = 0;
+ TestClientData::destructor_call_counter = 0;
+
+ expected_break_data = NULL;
+ was_debug_event_called = false;
+ was_debug_break_called = false;
+ v8::Debug::DebugBreakForCommand();
+ v8::Script::Compile(v8::String::New("(function(x){return x;})(1);"))->Run();
+ CHECK(was_debug_event_called);
+ CHECK(!was_debug_break_called);
+
+ TestClientData* data1 = new TestClientData();
+ expected_break_data = data1;
+ was_debug_event_called = false;
+ was_debug_break_called = false;
+ v8::Debug::DebugBreakForCommand(data1);
+ v8::Script::Compile(v8::String::New("(function(x){return x+1;})(1);"))->Run();
+ CHECK(was_debug_event_called);
+ CHECK(!was_debug_break_called);
+
+ expected_break_data = NULL;
+ was_debug_event_called = false;
+ was_debug_break_called = false;
+ v8::Debug::DebugBreak();
+ v8::Script::Compile(v8::String::New("(function(x){return x+2;})(1);"))->Run();
+ CHECK(!was_debug_event_called);
+ CHECK(was_debug_break_called);
+
+ TestClientData* data2 = new TestClientData();
+ expected_break_data = data2;
+ was_debug_event_called = false;
+ was_debug_break_called = false;
+ v8::Debug::DebugBreak();
+ v8::Debug::DebugBreakForCommand(data2);
+ v8::Script::Compile(v8::String::New("(function(x){return x+3;})(1);"))->Run();
+ CHECK(was_debug_event_called);
+ CHECK(was_debug_break_called);
+
+ CHECK_EQ(2, TestClientData::constructor_call_counter);
+ CHECK_EQ(TestClientData::constructor_call_counter,
+ TestClientData::destructor_call_counter);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index f890fc1..2bb32e7 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -437,6 +437,11 @@
"eeb10bc0 vsqrt.f64 d0, d0");
COMPARE(vsqrt(d2, d3, ne),
"1eb12bc3 vsqrt.f64ne d2, d3");
+
+ COMPARE(vmov(d0, 1.0),
+ "eeb70b00 vmov.f64 d0, #1");
+ COMPARE(vmov(d2, -13.0),
+ "eeba2b0a vmov.f64 d2, #-13");
}
VERIFY_RUN();
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index e51bfab..40fadd8 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -415,7 +415,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 7f1e3d8..1819aa4 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -56,8 +56,7 @@
TEST(ConstructorProfile) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
CompileAndRunScript(
"function F() {} // A constructor\n"
@@ -144,8 +143,7 @@
TEST(ClustersCoarserSimple) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@@ -183,8 +181,7 @@
TEST(ClustersCoarserMultipleConstructors) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@@ -214,8 +211,7 @@
TEST(ClustersCoarserPathsTraversal) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@@ -267,8 +263,7 @@
TEST(ClustersCoarserSelf) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
@@ -362,8 +357,7 @@
TEST(RetainerProfile) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
CompileAndRunScript(
"function A() {}\n"
@@ -431,8 +425,8 @@
static const v8::HeapGraphNode* GetGlobalObject(
const v8::HeapSnapshot* snapshot) {
- CHECK_EQ(1, snapshot->GetHead()->GetChildrenCount());
- return snapshot->GetHead()->GetChild(0)->GetToNode();
+ CHECK_EQ(1, snapshot->GetRoot()->GetChildrenCount());
+ return snapshot->GetRoot()->GetChild(0)->GetToNode();
}
@@ -449,6 +443,19 @@
}
+static bool IsNodeRetainedAs(const v8::HeapGraphNode* node,
+ v8::HeapGraphEdge::Type type,
+ const char* name) {
+ for (int i = 0, count = node->GetRetainersCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetRetainer(i);
+ v8::String::AsciiValue prop_name(prop->GetName());
+ if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
+ return true;
+ }
+ return false;
+}
+
+
static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = node->GetChild(i);
@@ -464,11 +471,9 @@
TEST(HeapSnapshot) {
v8::HandleScope scope;
-
v8::Handle<v8::String> token1 = v8::String::New("token1");
- v8::Handle<v8::Context> env1 = v8::Context::New();
+ LocalContext env1;
env1->SetSecurityToken(token1);
- env1->Enter();
CompileAndRunScript(
"function A1() {}\n"
@@ -479,9 +484,8 @@
"var c1 = new C1(a1);");
v8::Handle<v8::String> token2 = v8::String::New("token2");
- v8::Handle<v8::Context> env2 = v8::Context::New();
+ LocalContext env2;
env2->SetSecurityToken(token2);
- env2->Enter();
CompileAndRunScript(
"function A2() {}\n"
@@ -569,8 +573,7 @@
TEST(HeapSnapshotCodeObjects) {
v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
+ LocalContext env;
CompileAndRunScript(
"function lazy(x) { return x - 1; }\n"
@@ -598,12 +601,13 @@
CHECK_NE(NULL, lazy_code);
// Verify that non-compiled code doesn't contain references to "x"
- // literal, while compiled code does.
+ // literal, while compiled code does. The scope info is stored in FixedArray
+ // objects attached to the SharedFunctionInfo.
bool compiled_references_x = false, lazy_references_x = false;
for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::CODE) {
+ if (node->GetType() == v8::HeapGraphNode::ARRAY) {
if (HasString(node, "x")) {
compiled_references_x = true;
break;
@@ -613,7 +617,7 @@
for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::CODE) {
+ if (node->GetType() == v8::HeapGraphNode::ARRAY) {
if (HasString(node, "x")) {
lazy_references_x = true;
break;
@@ -624,4 +628,132 @@
CHECK(!lazy_references_x);
}
+
+// Trying to introduce a check helper for uint64_t causes many
+// overloading ambiguities, so it seems easier just to cast
+// them to a signed type.
+#define CHECK_EQ_UINT64_T(a, b) \
+ CHECK_EQ(static_cast<int64_t>(a), static_cast<int64_t>(b))
+#define CHECK_NE_UINT64_T(a, b) do \
+ { \
+ bool ne = a != b; \
+ CHECK(ne); \
+ } while (false)
+
+TEST(HeapEntryIdsAndGC) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileAndRunScript(
+ "function A() {}\n"
+ "function B(x) { this.x = x; }\n"
+ "var a = new A();\n"
+ "var b = new B(a);");
+ const v8::HeapSnapshot* snapshot1 =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
+
+ i::Heap::CollectAllGarbage(true); // Enforce compaction.
+
+ const v8::HeapSnapshot* snapshot2 =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
+
+ const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
+ const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
+ CHECK_NE_UINT64_T(0, global1->GetId());
+ CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
+ const v8::HeapGraphNode* A1 =
+ GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "A");
+ const v8::HeapGraphNode* A2 =
+ GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "A");
+ CHECK_NE_UINT64_T(0, A1->GetId());
+ CHECK_EQ_UINT64_T(A1->GetId(), A2->GetId());
+ const v8::HeapGraphNode* B1 =
+ GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "B");
+ const v8::HeapGraphNode* B2 =
+ GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "B");
+ CHECK_NE_UINT64_T(0, B1->GetId());
+ CHECK_EQ_UINT64_T(B1->GetId(), B2->GetId());
+ const v8::HeapGraphNode* a1 =
+ GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "a");
+ const v8::HeapGraphNode* a2 =
+ GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "a");
+ CHECK_NE_UINT64_T(0, a1->GetId());
+ CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
+ const v8::HeapGraphNode* b1 =
+ GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "b");
+ const v8::HeapGraphNode* b2 =
+ GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "b");
+ CHECK_NE_UINT64_T(0, b1->GetId());
+ CHECK_EQ_UINT64_T(b1->GetId(), b2->GetId());
+}
+
+
+TEST(HeapSnapshotsDiff) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileAndRunScript(
+ "function A() {}\n"
+ "function B(x) { this.x = x; }\n"
+ "var a = new A();\n"
+ "var b = new B(a);");
+ const v8::HeapSnapshot* snapshot1 =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
+
+ CompileAndRunScript(
+ "delete a;\n"
+ "b.x = null;\n"
+ "var a = new A();\n"
+ "var b2 = new B(a);");
+ const v8::HeapSnapshot* snapshot2 =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
+
+ const v8::HeapSnapshotsDiff* diff = snapshot1->CompareWith(snapshot2);
+
+ // Verify additions: ensure that addition of A and B was detected.
+ const v8::HeapGraphNode* additions_root = diff->GetAdditionsRoot();
+ bool found_A = false, found_B = false;
+ uint64_t s1_A_id = 0;
+ for (int i = 0, count = additions_root->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = additions_root->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::OBJECT) {
+ v8::String::AsciiValue node_name(node->GetName());
+ if (strcmp(*node_name, "A") == 0) {
+ CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
+ CHECK(!found_A);
+ found_A = true;
+ s1_A_id = node->GetId();
+ } else if (strcmp(*node_name, "B") == 0) {
+ CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "b2"));
+ CHECK(!found_B);
+ found_B = true;
+ }
+ }
+ }
+ CHECK(found_A);
+ CHECK(found_B);
+
+ // Verify deletions: ensure that deletion of A was detected.
+ const v8::HeapGraphNode* deletions_root = diff->GetDeletionsRoot();
+ bool found_A_del = false;
+ uint64_t s2_A_id = 0;
+ for (int i = 0, count = deletions_root->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = deletions_root->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::OBJECT) {
+ v8::String::AsciiValue node_name(node->GetName());
+ if (strcmp(*node_name, "A") == 0) {
+ CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
+ CHECK(!found_A_del);
+ found_A_del = true;
+ s2_A_id = node->GetId();
+ }
+ }
+ }
+ CHECK(found_A_del);
+ CHECK_NE_UINT64_T(0, s1_A_id);
+ CHECK(s1_A_id != s2_A_id);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 195fef4..01f23aa 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -77,7 +77,6 @@
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
@@ -91,7 +90,6 @@
}
Object* copy = Heap::CreateCode(desc,
- NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(copy->IsCode());
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index e461349..5add082 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -47,27 +47,6 @@
# We do not have a global object called 'global' as required by tests.
chapter15/15.1: FAIL_OK
-# NOT IMPLEMENTED: seal
-chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
-# NOT IMPLEMENTED: freeze
-chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
-# NOT IMPLEMENTED: isSealed
-chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED
-# NOT IMPLEMENTED: isFrozen
-chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
-
-# NOT IMPLEMENTED: seal
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: UNIMPLEMENTED
-
-# NOT IMPLEMENTED: freeze
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-21: UNIMPLEMENTED
-
-# NOT IMPLEMENTED: isSealed
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-23: UNIMPLEMENTED
-
-# NOT IMPLEMENTED: isFrozen
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-24: UNIMPLEMENTED
-
# NOT IMPLEMENTED: bind
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED
diff --git a/test/mjsunit/apply.js b/test/mjsunit/apply.js
index cab7eb8..613d37d 100644
--- a/test/mjsunit/apply.js
+++ b/test/mjsunit/apply.js
@@ -94,7 +94,7 @@
}
return doo;
}
-
+
assertEquals("42foofishhorse", f.apply(this, arr), "apply to this");
function s() {
@@ -112,28 +112,13 @@
return arguments.length + arguments[arguments.length - 1];
}
-var stack_corner_case_failure = false;
-
for (var j = 1; j < 0x40000000; j <<= 1) {
try {
var a = new Array(j);
a[j - 1] = 42;
assertEquals(42 + j, al.apply(345, a));
} catch (e) {
- if (e.toString().indexOf("Maximum call stack size exceeded") != -1) {
- // For some combinations of build settings, it may be the case that the
- // stack here is just tall enough to contain the array whose size is
- // specified by j but is not tall enough to contain the activation
- // record for the apply call. Allow one such corner case through,
- // checking that the length check will do the right thing for an array
- // the next size up.
- assertEquals(false, stack_corner_case_failure);
- stack_corner_case_failure = true;
- continue;
- }
- assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
- "exception does not contain Function.prototype.apply: " +
- e.toString());
+ assertTrue(e.toString().indexOf("Maximum call stack size exceeded") != -1);
for (; j < 0x40000000; j <<= 1) {
var caught = false;
try {
@@ -143,9 +128,7 @@
assertUnreachable("Apply of array with length " + a.length +
" should have thrown");
} catch (e) {
- assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
- "exception does not contain Function.prototype.apply [" +
- "length = " + j + "]: " + e.toString());
+ assertTrue(e.toString().indexOf("Maximum call stack size exceeded") != -1);
caught = true;
}
assertTrue(caught, "exception not caught");
diff --git a/test/mjsunit/call-stub.js b/test/mjsunit/call-stub.js
index a9132a6..9d11649 100644
--- a/test/mjsunit/call-stub.js
+++ b/test/mjsunit/call-stub.js
@@ -49,3 +49,18 @@
}
assertEquals(i < 50 || i >= 70 ? 1 : 2, h.m());
}
+
+
+var nonsymbol = 'wwwww '.split(' ')[0];
+Hash.prototype.wwwww = Hash.prototype.m;
+
+for (var i = 1; i < 100; i++) {
+ if (i == 50) {
+ h[nonsymbol] = function() {
+ return 2;
+ };
+ } else if (i == 70) {
+ delete h[nonsymbol];
+ }
+ assertEquals(i < 50 || i >= 70 ? 1 : 2, h.wwwww());
+}
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index 85457cd..945b662 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -85,7 +85,7 @@
};
assertEquals(null, n4.toJSON());
-assertEquals(Object.prototype, JSON.__proto__);
+assertTrue(Object.prototype === JSON.__proto__);
assertEquals("[object JSON]", Object.prototype.toString.call(JSON));
// DontEnum
@@ -313,3 +313,7 @@
var x = 0;
eval("(1); x++; (1)");
TestInvalid('1); x++; (1');
+
+// Test string conversion of argument.
+var o = { toString: function() { return "42"; } };
+assertEquals(42, JSON.parse(o));
diff --git a/test/mjsunit/object-freeze.js b/test/mjsunit/object-freeze.js
new file mode 100644
index 0000000..5ab45e1
--- /dev/null
+++ b/test/mjsunit/object-freeze.js
@@ -0,0 +1,193 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the Object.freeze and Object.isFrozen methods - ES 15.2.3.9 and
+// ES 15.2.3.12
+
+
+// Test that we throw an error if an object is not passed as argument.
+var non_objects = new Array(undefined, null, 1, -1, 0, 42.43);
+for (var key in non_objects) {
+ try {
+ Object.freeze(non_objects[key]);
+ assertUnreachable();
+ } catch(e) {
+ assertTrue(/Object.freeze called on non-object/.test(e));
+ }
+}
+
+for (var key in non_objects) {
+ try {
+ Object.isFrozen(non_objects[key]);
+ assertUnreachable();
+ } catch(e) {
+ assertTrue(/Object.isFrozen called on non-object/.test(e));
+ }
+}
+
+// Test normal data properties.
+var obj = { x: 42, z: 'foobar' };
+var desc = Object.getOwnPropertyDescriptor(obj, 'x');
+assertTrue(desc.writable);
+assertTrue(desc.configurable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(obj, 'z');
+assertTrue(desc.writable);
+assertTrue(desc.configurable);
+assertEquals('foobar', desc.value);
+
+assertTrue(Object.isExtensible(obj));
+assertFalse(Object.isFrozen(obj));
+
+Object.freeze(obj);
+
+// Make sure we are no longer extensible.
+assertFalse(Object.isExtensible(obj));
+assertTrue(Object.isFrozen(obj));
+
+try {
+ obj.foo = 42;
+ assertUnreachable();
+} catch(e) {
+ assertTrue(/object is not extensible/.test(e));
+}
+
+desc = Object.getOwnPropertyDescriptor(obj, 'x');
+assertFalse(desc.writable);
+assertFalse(desc.configurable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(obj, 'z');
+assertFalse(desc.writable);
+assertFalse(desc.configurable);
+assertEquals("foobar", desc.value);
+
+// Make sure that even if we try overwrite a value that is not writable, it is
+// not changed.
+obj.x = "tete";
+assertEquals(42, obj.x);
+obj.x = { get: function() {return 43}, set: function() {} };
+assertEquals(42, obj.x);
+
+// Test on accessors.
+var obj2 = {};
+function get() { return 43; };
+function set() {};
+Object.defineProperty(obj2, 'x', { get: get, set: set, configurable: true });
+
+desc = Object.getOwnPropertyDescriptor(obj2, 'x');
+assertTrue(desc.configurable);
+assertEquals(undefined, desc.value);
+assertEquals(set, desc.set);
+assertEquals(get, desc.get);
+
+assertTrue(Object.isExtensible(obj2));
+assertFalse(Object.isFrozen(obj2));
+Object.freeze(obj2);
+assertTrue(Object.isFrozen(obj2));
+assertFalse(Object.isExtensible(obj2));
+
+desc = Object.getOwnPropertyDescriptor(obj2, 'x');
+assertFalse(desc.configurable);
+assertEquals(undefined, desc.value);
+assertEquals(set, desc.set);
+assertEquals(get, desc.get);
+
+try {
+ obj2.foo = 42;
+ assertUnreachable();
+} catch(e) {
+ assertTrue(/object is not extensible/.test(e));
+}
+
+
+// Test freeze on arrays.
+var arr = new Array(42,43);
+
+desc = Object.getOwnPropertyDescriptor(arr, '0');
+assertTrue(desc.configurable);
+assertTrue(desc.writable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(arr, '1');
+assertTrue(desc.configurable);
+assertTrue(desc.writable);
+assertEquals(43, desc.value);
+
+assertTrue(Object.isExtensible(arr));
+assertFalse(Object.isFrozen(arr));
+Object.freeze(arr);
+assertTrue(Object.isFrozen(arr));
+assertFalse(Object.isExtensible(arr));
+
+desc = Object.getOwnPropertyDescriptor(arr, '0');
+assertFalse(desc.configurable);
+assertFalse(desc.writable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(arr, '1');
+assertFalse(desc.configurable);
+assertFalse(desc.writable);
+assertEquals(43, desc.value);
+
+arr[0] = 'foo';
+
+assertEquals(arr[0], 42);
+
+
+// Test that isFrozen return the correct value even if configurable has been set
+// to false on all properties manually and the extensible flag has also been set
+// to false manually.
+var obj3 = { x: 42, y: 'foo' };
+
+assertFalse(Object.isFrozen(obj3));
+
+Object.defineProperty(obj3, 'x', {configurable: false, writable: false});
+Object.defineProperty(obj3, 'y', {configurable: false, writable: false});
+Object.preventExtensions(obj3);
+
+assertTrue(Object.isFrozen(obj3));
+
+
+// Make sure that an object that has only non-configurable, but one
+// writable property, is not classified as frozen.
+var obj4 = {};
+Object.defineProperty(obj4, 'x', {configurable: false, writable: true});
+Object.defineProperty(obj4, 'y', {configurable: false, writable: false});
+Object.preventExtensions(obj4);
+
+assertFalse(Object.isFrozen(obj4));
+
+// Make sure that an object that has only non-writable, but one
+// configurable property, is not classified as frozen.
+var obj5 = {};
+Object.defineProperty(obj5, 'x', {configurable: true, writable: false});
+Object.defineProperty(obj5, 'y', {configurable: false, writable: false});
+Object.preventExtensions(obj5);
+
+assertFalse(Object.isFrozen(obj5));
diff --git a/test/mjsunit/object-seal.js b/test/mjsunit/object-seal.js
new file mode 100644
index 0000000..896411c
--- /dev/null
+++ b/test/mjsunit/object-seal.js
@@ -0,0 +1,195 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the Object.seal and Object.isSealed methods - ES 15.2.3.9 and
+// ES 15.2.3.12
+
+
+// Test that we throw an error if an object is not passed as argument.
+var non_objects = new Array(undefined, null, 1, -1, 0, 42.43);
+for (var key in non_objects) {
+ try {
+ Object.seal(non_objects[key]);
+ assertUnreachable();
+ } catch(e) {
+ assertTrue(/Object.seal called on non-object/.test(e));
+ }
+}
+
+for (var key in non_objects) {
+ try {
+ Object.isSealed(non_objects[key]);
+ assertUnreachable();
+ } catch(e) {
+ assertTrue(/Object.isSealed called on non-object/.test(e));
+ }
+}
+
+// Test normal data properties.
+var obj = { x: 42, z: 'foobar' };
+var desc = Object.getOwnPropertyDescriptor(obj, 'x');
+assertTrue(desc.writable);
+assertTrue(desc.configurable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(obj, 'z');
+assertTrue(desc.writable);
+assertTrue(desc.configurable);
+assertEquals('foobar', desc.value);
+
+assertTrue(Object.isExtensible(obj));
+assertFalse(Object.isSealed(obj));
+
+Object.seal(obj);
+
+// Make sure we are no longer extensible.
+assertFalse(Object.isExtensible(obj));
+assertTrue(Object.isSealed(obj));
+
+// We should not be frozen, since we are still able to
+// update values.
+assertFalse(Object.isFrozen(obj));
+
+// We should not allow new properties to be added.
+try {
+ obj.foo = 42;
+ assertUnreachable();
+} catch(e) {
+ assertTrue(/object is not extensible/.test(e));
+}
+
+desc = Object.getOwnPropertyDescriptor(obj, 'x');
+assertTrue(desc.writable);
+assertFalse(desc.configurable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(obj, 'z');
+assertTrue(desc.writable);
+assertFalse(desc.configurable);
+assertEquals("foobar", desc.value);
+
+// Since writable is not affected by seal we should still be able to
+// update the values.
+obj.x = "43";
+assertEquals(43, obj.x);
+
+// Test on accessors.
+var obj2 = {};
+function get() { return 43; };
+function set() {};
+Object.defineProperty(obj2, 'x', { get: get, set: set, configurable: true });
+
+desc = Object.getOwnPropertyDescriptor(obj2, 'x');
+assertTrue(desc.configurable);
+assertEquals(undefined, desc.value);
+assertEquals(set, desc.set);
+assertEquals(get, desc.get);
+
+assertTrue(Object.isExtensible(obj2));
+assertFalse(Object.isSealed(obj2));
+Object.seal(obj2);
+
+// Since this is an accessor property the object is now effectively both
+// sealed and frozen (accessors has no writable attribute).
+assertTrue(Object.isFrozen(obj2));
+assertFalse(Object.isExtensible(obj2));
+assertTrue(Object.isSealed(obj2));
+
+desc = Object.getOwnPropertyDescriptor(obj2, 'x');
+assertFalse(desc.configurable);
+assertEquals(undefined, desc.value);
+assertEquals(set, desc.set);
+assertEquals(get, desc.get);
+
+try {
+ obj2.foo = 42;
+ assertUnreachable();
+} catch(e) {
+ assertTrue(/object is not extensible/.test(e));
+}
+
+
+// Test seal on arrays.
+var arr = new Array(42,43);
+
+desc = Object.getOwnPropertyDescriptor(arr, '0');
+assertTrue(desc.configurable);
+assertTrue(desc.writable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(arr, '1');
+assertTrue(desc.configurable);
+assertTrue(desc.writable);
+assertEquals(43, desc.value);
+
+assertTrue(Object.isExtensible(arr));
+assertFalse(Object.isSealed(arr));
+Object.seal(arr);
+assertTrue(Object.isSealed(arr));
+assertFalse(Object.isExtensible(arr));
+// Since the values in the array is still writable this object
+// is not frozen.
+assertFalse(Object.isFrozen(arr));
+
+desc = Object.getOwnPropertyDescriptor(arr, '0');
+assertFalse(desc.configurable);
+assertTrue(desc.writable);
+assertEquals(42, desc.value);
+
+desc = Object.getOwnPropertyDescriptor(arr, '1');
+assertFalse(desc.configurable);
+assertTrue(desc.writable);
+assertEquals(43, desc.value);
+
+arr[0] = 'foo';
+
+// We should be able to overwrite the existing value.
+assertEquals('foo', arr[0]);
+
+
+// Test that isSealed returns the correct value even if configurable
+// has been set to false on all properties manually and the extensible
+// flag has also been set to false manually.
+var obj3 = { x: 42, y: 'foo' };
+
+assertFalse(Object.isFrozen(obj3));
+
+Object.defineProperty(obj3, 'x', {configurable: false, writable: true});
+Object.defineProperty(obj3, 'y', {configurable: false, writable: false});
+Object.preventExtensions(obj3);
+
+assertTrue(Object.isSealed(obj3));
+
+
+// Make sure that an object that has a configurable property
+// is not classified as sealed.
+var obj4 = {};
+Object.defineProperty(obj4, 'x', {configurable: true, writable: false});
+Object.defineProperty(obj4, 'y', {configurable: false, writable: false});
+Object.preventExtensions(obj4);
+
+assertFalse(Object.isSealed(obj4));
diff --git a/test/mjsunit/regress/regress-r4998.js b/test/mjsunit/regress/regress-r4998.js
new file mode 100644
index 0000000..9cf3371
--- /dev/null
+++ b/test/mjsunit/regress/regress-r4998.js
@@ -0,0 +1,94 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test for a broken fast-smi-loop that does not save the incremented value
+// of the loop index. If this test fails, it loops forever, and times out.
+
+// Flags: --nofull-compiler
+
+// Calling foo() spills the virtual frame.
+function foo() {
+ return;
+}
+
+function bar() {
+ var x1 = 3;
+ var x2 = 3;
+ var x3 = 3;
+ var x4 = 3;
+ var x5 = 3;
+ var x6 = 3;
+ var x7 = 3;
+ var x8 = 3;
+ var x9 = 3;
+ var x10 = 3;
+ var x11 = 3;
+ var x12 = 3;
+ var x13 = 3;
+
+ foo();
+
+ x1 = 257;
+ x2 = 258;
+ x3 = 259;
+ x4 = 260;
+ x5 = 261;
+ x6 = 262;
+ x7 = 263;
+ x8 = 264;
+ x9 = 265;
+ x10 = 266;
+ x11 = 267;
+ x12 = 268;
+ x13 = 269;
+
+ // The loop variable x7 is initialized to 3,
+ // and then MakeMergeable is called on the virtual frame.
+ // MakeMergeable has forced the loop variable x7 to be spilled,
+ // so it is marked as synced
+ // The back edge then merges its virtual frame, which incorrectly
+ // claims that x7 is synced, and does not save the modified
+ // value.
+ for (x7 = 3; x7 < 10; ++x7) {
+ foo();
+ }
+}
+
+bar();
+
+function aliasing() {
+ var x = 3;
+ var j;
+ for (j = 7; j < 11; ++j) {
+ x = j;
+ }
+
+ assertEquals(10, x);
+ assertEquals(11, j);
+}
+
+aliasing();