Update V8 to r6768 as required by WebKit r78450
Change-Id: Ib8868ff7147a76547a8d1d85f257ebe8546a3d3f
diff --git a/AUTHORS b/AUTHORS
index ea5b93e..92b69cb 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -9,6 +9,7 @@
Hewlett-Packard Development Company, LP
Alexander Botero-Lowry <alexbl@FreeBSD.org>
+Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Bert Belder <bertbelder@gmail.com>
@@ -26,11 +27,14 @@
Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Michael Smith <mike@w3.org>
+Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
+Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com>
+Zaheer Ahmad <zahmad@codeaurora.org>
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 6e91669..a976a48 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -60,7 +60,6 @@
src/messages.cc \
src/objects.cc \
src/objects-visiting.cc \
- src/oprofile-agent.cc \
src/parser.cc \
src/preparse-data.cc \
src/preparser.cc \
diff --git a/ChangeLog b/ChangeLog
index c9ff297..5d2af9c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,121 @@
+2011-02-14: Version 3.1.4
+
+ Fixed incorrect compare of prototypes of the global object (issue
+ 1082).
+
+ Fixed a bug in optimizing calls to global functions (issue 1106).
+
+ Made optimized Function.prototype.apply safe for non-JSObject first
+ arguments (issue 1128).
+
+ Fixed an error related to element accessors on Object.prototype and
+ parser errors (issue 1130).
+
+ Fixed a bug in sorting an array with large array indices (issue 1131).
+
+ Properly treat exceptions thrown while compiling (issue 1132).
+
+ Fixed bug in register requirements for function.apply (issue 1133).
+
+ Fixed a representation change bug in the Hydrogen graph construction
+ (issue 1134).
+
+ Fixed the semantics of delete on parameters (issue 1136).
+
+ Fixed a optimizer bug related to moving instructions with side effects
+ (issue 1138).
+
+ Added support for the global object in Object.keys (issue 1150).
+
+ Fixed incorrect value for Math.LOG10E
+ (issue http://code.google.com/p/chromium/issues/detail?id=72555)
+
+ Performance improvements on the IA32 platform.
+
+ Implement assignment to undefined reference in ES5 Strict Mode.
+
+
+2011-02-09: Version 3.1.3
+
+ Fixed a bug triggered by functions with huge numbers of declared
+ arguments.
+
+ Fixed zap value aliasing a real object - debug mode only (issue 866).
+
+ Fixed issue where Array.prototype.__proto__ had been set to null
+ (issue 1121).
+
+ Fixed stability bugs in Crankshaft for x86.
+
+
+2011-02-07: Version 3.1.2
+
+ Added better security checks when accessing properties via
+ Object.getOwnPropertyDescriptor.
+
+ Fixed bug in Object.defineProperty and related access bugs (issues
+ 992, 1083 and 1092).
+
+ Added LICENSE.v8, LICENSE.strongtalk and LICENSE.valgrind to ease
+ copyright notice generation for embedders.
+
+
+2011-02-02: Version 3.1.1
+
+ Perform security checks before fetching the value in
+ Object.getOwnPropertyDescriptor.
+
+ Fixed a bug in Array.prototype.splice triggered by passing no
+ arguments.
+
+ Fixed bugs in -0 in arithmetic and in Math.pow.
+
+ Fixed bugs in the register allocator and in switching from optimized
+ to unoptimized code.
+
+
+2011-01-31: Version 3.1.0
+
+ Performance improvements on all platforms.
+
+
+2011-01-28: Version 3.0.12
+
+ Added support for strict mode parameter and object property
+ validation.
+
+ Fixed a couple of crash bugs.
+
+
+2011-01-25: Version 3.0.11
+
+ Fixed a bug in deletion of lookup slots that could cause global
+ variables to be accidentally deleted (http://crbug.com/70066).
+
+ Added support for strict mode octal literal verification.
+
+ Fixed a couple of crash bugs (issues 1070 and 1071).
+
+
+2011-01-24: Version 3.0.10
+
+ Fixed External::Wrap for 64-bit addresses (issue 1037).
+
+ Fixed incorrect .arguments variable proxy handling in the full
+ code generator (issue 1060).
+
+ Introduced partial strict mode support.
+
+ Changed formatting of recursive error messages to match Firefox and
+ Safari (issue http://crbug.com/70334).
+
+ Fixed incorrect rounding for float-to-integer conversions for external
+ array types, which implement the Typed Array spec
+ (issue http://crbug.com/50972).
+
+ Performance improvements on the IA32 platform.
+
+
2011-01-19: Version 3.0.9
Added basic GDB JIT Interface integration.
diff --git a/SConstruct b/SConstruct
index e84db84..017bcad 100644
--- a/SConstruct
+++ b/SConstruct
@@ -32,7 +32,7 @@
from os.path import join, dirname, abspath
from types import DictType, StringTypes
root_dir = dirname(File('SConstruct').rfile().abspath)
-sys.path.append(join(root_dir, 'tools'))
+sys.path.insert(0, join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
@@ -127,6 +127,10 @@
},
'inspector:on': {
'CPPDEFINES': ['INSPECTOR'],
+ },
+ 'liveobjectlist:on': {
+ 'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
+ 'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
}
},
'gcc': {
@@ -230,9 +234,6 @@
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
- 'prof:oprofile': {
- 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
- },
'gdbjit:on': {
'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE']
}
@@ -326,7 +327,7 @@
},
'msvc': {
'all': {
- 'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
+ 'WARNINGFLAGS': ['/W3', '/WX', '/wd4351', '/wd4355', '/wd4800']
},
'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'],
@@ -535,10 +536,6 @@
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
- 'prof:oprofile': {
- 'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
- 'LIBS': ['opagent']
- }
},
'msvc': {
'all': {
@@ -708,7 +705,7 @@
'help': 'build using snapshots for faster start-up'
},
'prof': {
- 'values': ['on', 'off', 'oprofile'],
+ 'values': ['on', 'off'],
'default': 'off',
'help': 'enable profiling of build target'
},
@@ -752,6 +749,11 @@
'default': 'off',
'help': 'enable inspector features'
},
+ 'liveobjectlist': {
+ 'values': ['on', 'off'],
+ 'default': 'off',
+ 'help': 'enable live object list features in the debugger'
+ },
'soname': {
'values': ['on', 'off'],
'default': 'off',
@@ -890,8 +892,6 @@
Abort("Profiling on windows only supported for static library.")
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
- if env['prof'] == 'oprofile' and env['os'] != 'linux':
- Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
@@ -1009,6 +1009,13 @@
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
+ if options['liveobjectlist'] == 'on':
+ if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
+ # Print a warning that liveobjectlist will implicitly enable the debugger
+ print "Warning: forcing debuggersupport on for liveobjectlist"
+ options['debuggersupport'] = 'on'
+ options['inspector'] = 'on'
+ options['objectprint'] = 'on'
def ParseEnvOverrides(arg, imports):
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index ee51eb7..07693bd 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,6 +1,6 @@
We use a V8 revision that has been used for a Chromium release.
-http://src.chromium.org/svn/releases/10.0.648.0/DEPS
-http://v8.googlecode.com/svn/trunk@6387 plus a partial cherry-pick to fix Android build ...
+http://src.chromium.org/svn/releases/11.0.672.0/DEPS
+http://v8.googlecode.com/svn/trunk@6768 plus a partial cherry-pick to fix Android build ...
- r7077 - CreateThread() in src/platform-linux.cc
diff --git a/include/v8.h b/include/v8.h
index 7d18107..83a5744 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -462,7 +462,6 @@
void Leave();
-
internal::Object** prev_next_;
internal::Object** prev_limit_;
@@ -3053,7 +3052,22 @@
*/
class V8EXPORT Context {
public:
- /** Returns the global object of the context. */
+ /**
+ * Returns the global proxy object or global object itself for
+ * detached contexts.
+ *
+ * Global proxy object is a thin wrapper whose prototype points to
+ * actual context's global object with the properties like Object, etc.
+ * This is done that way for security reasons (for more details see
+ * https://wiki.mozilla.org/Gecko:SplitWindow).
+ *
+ * Please note that changes to global proxy object prototype most probably
+ * would break VM---v8 expects only global object as a prototype of
+ * global proxy object.
+ *
+ * If DetachGlobal() has been invoked, Global() would return actual global
+ * object until global is reattached with ReattachGlobal().
+ */
Local<Object> Global();
/**
@@ -3367,7 +3381,7 @@
// For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
// with a plain reinterpret_cast.
- static const intptr_t kEncodablePointerMask = 0x1;
+ static const uintptr_t kEncodablePointerMask = 0x1;
static const int kPointerToSmiShift = 0;
};
@@ -3387,8 +3401,8 @@
// It might be not enough to cover stack allocated objects on some platforms.
static const int kPointerAlignment = 3;
- static const intptr_t kEncodablePointerMask =
- ~(intptr_t(0xffffffff) << kPointerAlignment);
+ static const uintptr_t kEncodablePointerMask =
+ ~(uintptr_t(0xffffffff) << kPointerAlignment);
static const int kPointerToSmiShift =
kSmiTagSize + kSmiShiftSize - kPointerAlignment;
@@ -3397,7 +3411,7 @@
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const intptr_t kEncodablePointerMask =
+const uintptr_t kEncodablePointerMask =
PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
@@ -3433,7 +3447,7 @@
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
- static const int kJSObjectType = 0x9f;
+ static const int kJSObjectType = 0xa0;
static const int kFirstNonstringType = 0x80;
static const int kProxyType = 0x85;
@@ -3457,7 +3471,7 @@
}
static inline void* GetExternalPointerFromSmi(internal::Object* value) {
- const intptr_t address = reinterpret_cast<intptr_t>(value);
+ const uintptr_t address = reinterpret_cast<uintptr_t>(value);
return reinterpret_cast<void*>(address >> kPointerToSmiShift);
}
diff --git a/src/SConscript b/src/SConscript
index 708edef..c3561be 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -89,13 +89,14 @@
lithium-allocator.cc
lithium.cc
liveedit.cc
+ liveobjectlist.cc
log-utils.cc
log.cc
mark-compact.cc
messages.cc
objects.cc
+ objects-printer.cc
objects-visiting.cc
- oprofile-agent.cc
parser.cc
preparser.cc
preparse-data.cc
@@ -215,8 +216,9 @@
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
- x64/lithium-x64.cc
x64/lithium-codegen-x64.cc
+ x64/lithium-gap-resolver-x64.cc
+ x64/lithium-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
@@ -235,10 +237,8 @@
'os:win32': ['platform-win32.cc'],
'mode:release': [],
'mode:debug': [
- 'objects-debug.cc', 'objects-printer.cc', 'prettyprinter.cc',
- 'regexp-macro-assembler-tracer.cc'
- ],
- 'objectprint:on': ['objects-printer.cc']
+ 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
+ ]
}
diff --git a/src/accessors.cc b/src/accessors.cc
index c7d9cfe..2b205d5 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -675,46 +675,36 @@
int inlined_frame_index,
Vector<SlotRef>* args_slots) {
AssertNoAllocation no_gc;
-
int deopt_index = AstNode::kNoNumber;
-
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
-
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
-
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
-
USE(frame_count);
ASSERT(frame_count > inlined_frame_index);
-
int frames_to_skip = inlined_frame_index;
while (true) {
opcode = static_cast<Translation::Opcode>(it.Next());
-
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
-
if (opcode == Translation::FRAME) {
if (frames_to_skip == 0) {
- // We reached frame corresponding to inlined function in question.
- // Process translation commands for arguments.
-
- // Skip translation command for receiver.
+ // We reached the frame corresponding to the inlined function
+ // in question. Process the translation commands for the
+ // arguments.
+ //
+ // Skip the translation command for the receiver.
it.Skip(Translation::NumberOfOperandsFor(
static_cast<Translation::Opcode>(it.Next())));
-
// Compute slots for arguments.
for (int i = 0; i < args_slots->length(); ++i) {
(*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
}
-
return;
}
-
frames_to_skip--;
}
}
@@ -727,16 +717,11 @@
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
-
int args_count = inlined_function->shared()->formal_parameter_count();
-
ScopedVector<SlotRef> args_slots(args_count);
-
ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
-
Handle<JSObject> arguments =
Factory::NewArgumentsObject(inlined_function, args_count);
-
Handle<FixedArray> array = Factory::NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue();
@@ -766,39 +751,43 @@
if (functions[i] != *function) continue;
if (i > 0) {
- // Function in question was inlined.
+ // The function in question was inlined. Inlined functions have the
+ // correct number of arguments and no allocated arguments object, so
+ // we can construct a fresh one by interpreting the function's
+ // deoptimization input data.
return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
- } else {
+ }
+
+ if (!frame->is_optimized()) {
// If there is an arguments variable in the stack, we return that.
- int index = function->shared()->scope_info()->
- StackSlotIndex(Heap::arguments_symbol());
+ Handle<SerializedScopeInfo> info(function->shared()->scope_info());
+ int index = info->StackSlotIndex(Heap::arguments_symbol());
if (index >= 0) {
- Handle<Object> arguments =
- Handle<Object>(frame->GetExpression(index));
+ Handle<Object> arguments(frame->GetExpression(index));
if (!arguments->IsArgumentsMarker()) return *arguments;
}
-
- // If there isn't an arguments variable in the stack, we need to
- // find the frame that holds the actual arguments passed to the
- // function on the stack.
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
-
- // Get the number of arguments and construct an arguments object
- // mirror for the right frame.
- const int length = frame->GetProvidedParametersCount();
- Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
- length);
- Handle<FixedArray> array = Factory::NewFixedArray(length);
-
- // Copy the parameters to the arguments object.
- ASSERT(array->length() == length);
- for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
- arguments->set_elements(*array);
-
- // Return the freshly allocated arguments object.
- return *arguments;
}
+
+ // If there is no arguments variable in the stack or we have an
+ // optimized frame, we find the frame that holds the actual arguments
+ // passed to the function.
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+
+ // Get the number of arguments and construct an arguments object
+ // mirror for the right frame.
+ const int length = frame->GetProvidedParametersCount();
+ Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
+ length);
+ Handle<FixedArray> array = Factory::NewFixedArray(length);
+
+ // Copy the parameters to the arguments object.
+ ASSERT(array->length() == length);
+ for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+ arguments->set_elements(*array);
+
+ // Return the freshly allocated arguments object.
+ return *arguments;
}
functions.Rewind(0);
}
diff --git a/src/api.cc b/src/api.cc
index 073306f..d718c88 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -115,7 +115,9 @@
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
- ENTER_V8;
+#ifdef ENABLE_VMSTATE_TRACKING
+ i::VMState __state__(i::OTHER);
+#endif
API_Fatal(location, message);
}
@@ -668,7 +670,7 @@
void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
- if (IsDeadCheck("v8::Template::SetProperty()")) return;
+ if (IsDeadCheck("v8::Template::Set()")) return;
ENTER_V8;
HandleScope scope;
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
@@ -1478,11 +1480,11 @@
}
ENTER_V8;
HandleScope scope;
- i::Handle<i::JSObject> obj =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.name.
i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
return scope.Close(Utils::ToLocal(resource_name));
}
@@ -1494,11 +1496,11 @@
}
ENTER_V8;
HandleScope scope;
- i::Handle<i::JSObject> obj =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
i::Handle<i::Object> data(i::Script::cast(script->value())->data());
return scope.Close(Utils::ToLocal(data));
}
@@ -1510,9 +1512,9 @@
}
ENTER_V8;
HandleScope scope;
- i::Handle<i::JSObject> obj =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj = GetProperty(obj, "stackFrames");
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::Object> stackFramesObj(message->stack_frames());
if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
i::Handle<i::JSArray> stackTrace =
i::Handle<i::JSArray>::cast(stackFramesObj);
@@ -1552,6 +1554,7 @@
ON_BAILOUT("v8::Message::GetLineNumber()", return kNoLineNumberInfo);
ENTER_V8;
HandleScope scope;
+
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result = CallV8HeapFunction("GetLineNumber",
Utils::OpenHandle(this),
@@ -1565,9 +1568,9 @@
if (IsDeadCheck("v8::Message::GetStartPosition()")) return 0;
ENTER_V8;
HandleScope scope;
-
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- return static_cast<int>(GetProperty(data_obj, "startPos")->Number());
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
+ return message->start_position();
}
@@ -1575,8 +1578,9 @@
if (IsDeadCheck("v8::Message::GetEndPosition()")) return 0;
ENTER_V8;
HandleScope scope;
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- return static_cast<int>(GetProperty(data_obj, "endPos")->Number());
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
+ return message->end_position();
}
@@ -1606,8 +1610,10 @@
data_obj,
&has_pending_exception);
EXCEPTION_BAILOUT_CHECK(0);
- int start = static_cast<int>(GetProperty(data_obj, "startPos")->Number());
- int end = static_cast<int>(GetProperty(data_obj, "endPos")->Number());
+ i::Handle<i::JSMessageObject> message =
+ i::Handle<i::JSMessageObject>::cast(data_obj);
+ int start = message->start_position();
+ int end = message->end_position();
return static_cast<int>(start_col_obj->Number()) + (end - start);
}
@@ -2200,6 +2206,12 @@
ENTER_V8;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ // If both obj and other are JSObjects, we'd better compare by identity
+ // immediately when going into JS builtin. The reason is Invoke
+ // would overwrite global object receiver with global proxy.
+ if (obj->IsJSObject() && other->IsJSObject()) {
+ return *obj == *other;
+ }
i::Object** args[1] = { other.location() };
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result =
@@ -2649,26 +2661,38 @@
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
- i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
- i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
- int hash_value;
- if (hash->IsSmi()) {
- hash_value = i::Smi::cast(*hash)->value();
- } else {
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = i::V8::Random() & i::Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
- i::SetProperty(hidden_props,
- hash_symbol,
- i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
- static_cast<PropertyAttributes>(None));
+ i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
+ if (!hidden_props_obj->IsJSObject()) {
+ // We failed to create hidden properties. That's a detached
+ // global proxy.
+ ASSERT(hidden_props_obj->IsUndefined());
+ return 0;
}
+ i::Handle<i::JSObject> hidden_props =
+ i::Handle<i::JSObject>::cast(hidden_props_obj);
+ i::Handle<i::String> hash_symbol = i::Factory::identity_hash_symbol();
+ if (hidden_props->HasLocalProperty(*hash_symbol)) {
+ i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
+ CHECK(!hash.is_null());
+ CHECK(hash->IsSmi());
+ return i::Smi::cast(*hash)->value();
+ }
+
+ int hash_value;
+ int attempts = 0;
+ do {
+ // Generate a random 32-bit hash value but limit range to fit
+ // within a smi.
+ hash_value = i::V8::Random() & i::Smi::kMaxValue;
+ attempts++;
+ } while (hash_value == 0 && attempts < 30);
+ hash_value = hash_value != 0 ? hash_value : 1; // never return 0
+ CHECK(!i::SetLocalPropertyIgnoreAttributes(
+ hidden_props,
+ hash_symbol,
+ i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
+ static_cast<PropertyAttributes>(None)).is_null());
+
return hash_value;
}
@@ -2745,9 +2769,9 @@
return;
}
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
- i::Handle<i::Map> slow_map =
- i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
- self->set_map(*slow_map);
+ i::Handle<i::Map> pixel_array_map =
+ i::Factory::GetPixelArrayElementsMap(i::Handle<i::Map>(self->map()));
+ self->set_map(*pixel_array_map);
self->set_elements(*pixels);
}
@@ -3267,14 +3291,14 @@
static bool CanBeEncodedAsSmi(void* ptr) {
- const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return ((address & i::kEncodablePointerMask) == 0);
}
static i::Smi* EncodeAsSmi(void* ptr) {
ASSERT(CanBeEncodedAsSmi(ptr));
- const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
ASSERT(i::Internals::HasSmiTag(result));
ASSERT_EQ(result, i::Smi::FromInt(result->value()));
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 68d32f1..3b81102 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -198,6 +198,8 @@
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -221,6 +223,8 @@
StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 11a9c39..fb9bb48 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -213,74 +213,29 @@
// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Instruction encoding bits.
-enum {
- H = 1 << 5, // halfword (or byte)
- S6 = 1 << 6, // signed (or unsigned)
- L = 1 << 20, // load (or store)
- S = 1 << 20, // set condition code (or leave unchanged)
- W = 1 << 21, // writeback base register (or leave unchanged)
- A = 1 << 21, // accumulate in multiply instruction (or not)
- B = 1 << 22, // unsigned byte (or word)
- N = 1 << 22, // long (or short)
- U = 1 << 23, // positive (or negative) offset/index
- P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
- I = 1 << 25, // immediate shifter operand (or not)
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
-
- // Instruction bit masks.
- RdMask = 15 << 12, // in str instruction
- CondMask = 15 << 28,
- CoprocessorMask = 15 << 8,
- OpCodeMask = 15 << 21, // in data-processing instructions
- Imm24Mask = (1 << 24) - 1,
- Off12Mask = (1 << 12) - 1,
- // Reserved condition.
- nv = 15 << 28
-};
-
+// Specific instructions, constants, and masks.
// add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
- al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+const Instr kPopInstruction =
+ al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
-static const Instr kPushRegPattern =
+const Instr kPushRegPattern =
al | B26 | 4 | NegPreIndex | sp.code() * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
-static const Instr kPopRegPattern =
+const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16;
// mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
- B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+ B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
@@ -292,37 +247,31 @@
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
-const Instr kALUMask = 0x6f * B21;
-const Instr kAddPattern = 0x4 * B21;
-const Instr kSubPattern = 0x2 * B21;
-const Instr kBicPattern = 0xe * B21;
-const Instr kAndPattern = 0x0 * B21;
const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kRdMask = 0x0000f000;
-static const int kRdShift = 12;
-static const Instr kLdrRegFpOffsetPattern =
+const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | fp.code() * B16;
-static const Instr kStrRegFpOffsetPattern =
+const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | fp.code() * B16;
-static const Instr kLdrRegFpNegOffsetPattern =
+const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
-static const Instr kStrRegFpNegOffsetPattern =
+const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | fp.code() * B16;
-static const Instr kLdrStrInstrTypeMask = 0xffff0000;
-static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-static const Instr kLdrStrOffsetMask = 0x00000fff;
+const Instr kLdrStrInstrTypeMask = 0xffff0000;
+const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
+const Instr kLdrStrOffsetMask = 0x00000fff;
+
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
+
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false) {
- // BUG(3245989): disable peephole optimization if crankshaft is enabled.
allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
@@ -402,6 +351,11 @@
}
+Condition Assembler::GetCondition(Instr instr) {
+ return Instruction::ConditionField(instr);
+}
+
+
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
@@ -411,7 +365,7 @@
ASSERT(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
- return ((instr & Imm24Mask) << 8) >> 6;
+ return ((instr & kImm24Mask) << 8) >> 6;
}
@@ -423,7 +377,7 @@
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
- int offset = instr & Off12Mask; // Zero extended offset.
+ int offset = instr & kOff12Mask; // Zero extended offset.
return positive ? offset : -offset;
}
@@ -436,7 +390,7 @@
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
- return (instr & ~Off12Mask) | offset;
+ return (instr & ~kOff12Mask) | offset;
}
@@ -453,7 +407,7 @@
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
- return (instr & ~Off12Mask) | offset;
+ return (instr & ~kOff12Mask) | offset;
}
@@ -467,13 +421,27 @@
ASSERT(offset >= 0);
ASSERT(is_uint12(offset));
// Set the offset.
- return (instr & ~Off12Mask) | offset;
+ return (instr & ~kOff12Mask) | offset;
}
Register Assembler::GetRd(Instr instr) {
Register reg;
- reg.code_ = ((instr & kRdMask) >> kRdShift);
+ reg.code_ = Instruction::RdValue(instr);
+ return reg;
+}
+
+
+Register Assembler::GetRn(Instr instr) {
+ Register reg;
+ reg.code_ = Instruction::RnValue(instr);
+ return reg;
+}
+
+
+Register Assembler::GetRm(Instr instr) {
+ Register reg;
+ reg.code_ = Instruction::RmValue(instr);
return reg;
}
@@ -511,10 +479,39 @@
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & 0x0f7f0000) == 0x051f0000;
+ return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
}
+bool Assembler::IsTstImmediate(Instr instr) {
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
+ (I | TST | S);
+}
+
+
+bool Assembler::IsCmpRegister(Instr instr) {
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
+ (CMP | S);
+}
+
+
+bool Assembler::IsCmpImmediate(Instr instr) {
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
+ (I | CMP | S);
+}
+
+
+Register Assembler::GetCmpImmediateRegister(Instr instr) {
+ ASSERT(IsCmpImmediate(instr));
+ return GetRn(instr);
+}
+
+
+int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
+ ASSERT(IsCmpImmediate(instr));
+ return instr & kOff12Mask;
+}
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -532,13 +529,14 @@
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
+ if ((instr & ~kImm24Mask) == 0) {
// Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag);
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & Imm24Mask) << 8) >> 6;
- if ((instr & CondMask) == nv && (instr & B24) != 0) {
+ int imm26 = ((instr & kImm24Mask) << 8) >> 6;
+ if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
+ ((instr & B24) != 0)) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
@@ -548,7 +546,7 @@
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
+ if ((instr & ~kImm24Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
@@ -557,17 +555,17 @@
}
int imm26 = target_pos - (pos + kPcLoadDelta);
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if ((instr & CondMask) == nv) {
+ if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
} else {
ASSERT((imm26 & 3) == 0);
- instr &= ~Imm24Mask;
+ instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & Imm24Mask));
+ instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@@ -582,14 +580,14 @@
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos());
- if ((instr & ~Imm24Mask) == 0) {
+ if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n");
} else {
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- int cond = instr & CondMask;
+ Condition cond = Instruction::ConditionField(instr);
const char* b;
const char* c;
- if (cond == nv) {
+ if (cond == kSpecialCondition) {
b = "blx";
c = "";
} else {
@@ -731,14 +729,14 @@
}
} else {
Instr alu_insn = (*instr & kALUMask);
- if (alu_insn == kAddPattern ||
- alu_insn == kSubPattern) {
+ if (alu_insn == ADD ||
+ alu_insn == SUB) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
- } else if (alu_insn == kAndPattern ||
- alu_insn == kBicPattern) {
+ } else if (alu_insn == AND ||
+ alu_insn == BIC) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip;
return true;
@@ -782,7 +780,7 @@
Register rd,
const Operand& x) {
CheckBuffer();
- ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
// Immediate.
uint32_t rotate_imm;
@@ -794,8 +792,8 @@
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = static_cast<Condition>(instr & CondMask);
- if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ Condition cond = Instruction::ConditionField(instr);
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
@@ -836,7 +834,7 @@
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | B | L)) == B26);
+ ASSERT((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@@ -849,8 +847,7 @@
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
+ mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
@@ -869,7 +866,7 @@
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
@@ -883,8 +880,7 @@
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
+ mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
@@ -895,7 +891,7 @@
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
+ Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
@@ -909,7 +905,7 @@
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
ASSERT(rl != 0);
ASSERT(!rn.is(pc));
emit(instr | rn.code()*B16 | rl);
@@ -919,7 +915,7 @@
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
- (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+ (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
@@ -982,7 +978,7 @@
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+ emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
// Dead code is a good location to emit the constant pool.
@@ -996,7 +992,7 @@
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+ emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
@@ -1006,21 +1002,21 @@
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
- emit(nv | B27 | B25 | h | (imm24 & Imm24Mask));
+ emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
void Assembler::blx(Register target, Condition cond) { // v5 and above
positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@@ -1028,31 +1024,31 @@
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 0*B21 | s, src1, dst, src2);
+ addrmod1(cond | AND | s, src1, dst, src2);
}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 1*B21 | s, src1, dst, src2);
+ addrmod1(cond | EOR | s, src1, dst, src2);
}
void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 2*B21 | s, src1, dst, src2);
+ addrmod1(cond | SUB | s, src1, dst, src2);
}
void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 3*B21 | s, src1, dst, src2);
+ addrmod1(cond | RSB | s, src1, dst, src2);
}
void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 4*B21 | s, src1, dst, src2);
+ addrmod1(cond | ADD | s, src1, dst, src2);
// Eliminate pattern: push(r), pop()
// str(src, MemOperand(sp, 4, NegPreIndex), al);
@@ -1061,7 +1057,7 @@
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+ (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
@@ -1072,45 +1068,52 @@
void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 5*B21 | s, src1, dst, src2);
+ addrmod1(cond | ADC | s, src1, dst, src2);
}
void Assembler::sbc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 6*B21 | s, src1, dst, src2);
+ addrmod1(cond | SBC | s, src1, dst, src2);
}
void Assembler::rsc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 7*B21 | s, src1, dst, src2);
+ addrmod1(cond | RSC | s, src1, dst, src2);
}
void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 8*B21 | S, src1, r0, src2);
+ addrmod1(cond | TST | S, src1, r0, src2);
}
void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 9*B21 | S, src1, r0, src2);
+ addrmod1(cond | TEQ | S, src1, r0, src2);
}
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 10*B21 | S, src1, r0, src2);
+ addrmod1(cond | CMP | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp_raw_immediate(
+ Register src, int raw_immediate, Condition cond) {
+ ASSERT(is_uint12(raw_immediate));
+ emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
}
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 11*B21 | S, src1, r0, src2);
+ addrmod1(cond | CMN | S, src1, r0, src2);
}
void Assembler::orr(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 12*B21 | s, src1, dst, src2);
+ addrmod1(cond | ORR | s, src1, dst, src2);
}
@@ -1122,7 +1125,7 @@
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
- addrmod1(cond | 13*B21 | s, r0, dst, src);
+ addrmod1(cond | MOV | s, r0, dst, src);
}
@@ -1139,12 +1142,12 @@
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 14*B21 | s, src1, dst, src2);
+ addrmod1(cond | BIC | s, src1, dst, src2);
}
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | 15*B21 | s, r0, dst, src);
+ addrmod1(cond | MVN | s, r0, dst, src);
}
@@ -1222,7 +1225,7 @@
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | B4 | src.code());
+ 15*B8 | CLZ | src.code());
}
@@ -1376,7 +1379,7 @@
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) {
- if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+ if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
@@ -1457,8 +1460,8 @@
IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) {
- if ((mem_write_instr & kRdMask) ==
- (mem_read_instr & kRdMask)) {
+ if (Instruction::RdValue(mem_write_instr) ==
+ Instruction::RdValue(mem_read_instr)) {
// Pattern: push & pop from/to same register,
// with a fp+offset ldr in between
//
@@ -1473,7 +1476,8 @@
// else
// ldr rz, [fp, #-24]
- if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
+ if (Instruction::RdValue(mem_write_instr) ==
+ Instruction::RdValue(ldr_instr)) {
pc_ -= 3 * kInstrSize;
} else {
pc_ -= 3 * kInstrSize;
@@ -1503,22 +1507,23 @@
// ldr rz, [fp, #-24]
Register reg_pushed, reg_popped;
- if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
+ if (Instruction::RdValue(mem_read_instr) ==
+ Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
- } else if ((mem_write_instr & kRdMask)
- != (ldr_instr & kRdMask)) {
+ } else if (Instruction::RdValue(mem_write_instr) !=
+ Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
emit(ldr_instr);
mov(reg_popped, reg_pushed);
- } else if (((mem_read_instr & kRdMask)
- != (ldr_instr & kRdMask)) ||
- ((mem_write_instr & kRdMask)
- == (ldr_instr & kRdMask)) ) {
+ } else if ((Instruction::RdValue(mem_read_instr) !=
+ Instruction::RdValue(ldr_instr)) ||
+ (Instruction::RdValue(mem_write_instr) ==
+ Instruction::RdValue(ldr_instr))) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
@@ -1640,24 +1645,26 @@
// enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
- // See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
- // Simulator do not share constants declaration.
ASSERT(code >= kDefaultStopCode);
- static const uint32_t kStopInterruptCode = 1 << 23;
- static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
// The Simulator will handle the stop instruction and get the message address.
// It expects to find the address just after the svc instruction.
BlockConstPoolFor(2);
if (code >= 0) {
- svc(kStopInterruptCode + code, cond);
+ svc(kStopCode + code, cond);
} else {
- svc(kStopInterruptCode + kMaxStopCode, cond);
+ svc(kStopCode + kMaxStopCode, cond);
}
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- ASSERT(cond == al);
- bkpt(0);
+ if (cond != al) {
+ Label skip;
+ b(&skip, NegateCondition(cond));
+ bkpt(0);
+ bind(&skip);
+ } else {
+ bkpt(0);
+ }
#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
svc(0x9f0001, cond);
#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
@@ -1667,7 +1674,7 @@
void Assembler::bkpt(uint32_t imm16) { // v5 and above
ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
}
@@ -1697,7 +1704,7 @@
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -1720,7 +1727,7 @@
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -1743,7 +1750,7 @@
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -1773,7 +1780,7 @@
CRegister crd,
const MemOperand& src,
LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+ ldc(coproc, crd, src, l, kSpecialCondition);
}
@@ -1782,7 +1789,7 @@
Register rn,
int option,
LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+ ldc(coproc, crd, rn, option, l, kSpecialCondition);
}
@@ -1812,7 +1819,7 @@
coproc, CRegister crd,
const MemOperand& dst,
LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+ stc(coproc, crd, dst, l, kSpecialCondition);
}
@@ -1821,7 +1828,7 @@
Register rn,
int option,
LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+ stc(coproc, crd, rn, option, l, kSpecialCondition);
}
@@ -2171,7 +2178,7 @@
const int dst_code,
const VFPType src_type,
const int src_code,
- Assembler::ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(src_type != dst_type);
int D, Vd, M, Vm;
@@ -2214,7 +2221,7 @@
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
@@ -2223,7 +2230,7 @@
void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
@@ -2232,7 +2239,7 @@
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
@@ -2241,7 +2248,7 @@
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
@@ -2250,7 +2257,7 @@
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
@@ -2259,7 +2266,7 @@
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
@@ -2268,13 +2275,21 @@
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode,
+ VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
+void Assembler::vabs(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
+ 0x5*B9 | B8 | 0x3*B6 | src.code());
+}
+
+
void Assembler::vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -2402,7 +2417,7 @@
bool Assembler::IsNop(Instr instr, int type) {
- // Check for mov rx, rx.
+ // Check for mov rx, rx where x = type.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
@@ -2631,7 +2646,7 @@
// Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index ad1bdab..3941c84 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
+#include "constants-arm.h"
#include "serialize.h"
namespace v8 {
@@ -300,18 +301,6 @@
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
-// VFP FPSCR constants.
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
-
-static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register
struct CRegister {
@@ -372,149 +361,6 @@
};
-// Condition field in instructions.
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- eq = 0 << 28, // Z set equal.
- ne = 1 << 28, // Z clear not equal.
- nz = 1 << 28, // Z clear not zero.
- cs = 2 << 28, // C set carry set.
- hs = 2 << 28, // C set unsigned higher or same.
- cc = 3 << 28, // C clear carry clear.
- lo = 3 << 28, // C clear unsigned lower.
- mi = 4 << 28, // N set negative.
- pl = 5 << 28, // N clear positive or zero.
- vs = 6 << 28, // V set overflow.
- vc = 7 << 28, // V clear no overflow.
- hi = 8 << 28, // C set, Z clear unsigned higher.
- ls = 9 << 28, // C clear or Z set unsigned lower or same.
- ge = 10 << 28, // N == V greater or equal.
- lt = 11 << 28, // N != V less than.
- gt = 12 << 28, // Z clear, N == V greater than.
- le = 13 << 28, // Z set or N != V less then or equal
- al = 14 << 28 // always.
-};
-
-
-// Returns the equivalent of !cc.
-inline Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cc;
- };
-}
-
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
- LSL = 0 << 5,
- LSR = 1 << 5,
- ASR = 2 << 5,
- ROR = 3 << 5,
- RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
- SetCC = 1 << 20, // set condition code
- LeaveCC = 0 << 20 // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
- // bit encoding P U W
- Offset = (8|4|0) << 21, // offset (without writeback to base)
- PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
- PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
- NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
- NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
- NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
- // bit encoding P U W
- da = (0|0|0) << 21, // decrement after
- ia = (0|4|0) << 21, // increment after
- db = (8|0|0) << 21, // decrement before
- ib = (8|4|0) << 21, // increment before
- da_w = (0|0|1) << 21, // decrement after with writeback to base
- ia_w = (0|4|1) << 21, // increment after with writeback to base
- db_w = (8|0|1) << 21, // decrement before with writeback to base
- ib_w = (8|4|1) << 21 // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
- Long = 1 << 22, // long load/store coprocessor
- Short = 0 << 22 // short load/store coprocessor
-};
-
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -658,9 +504,6 @@
};
-typedef int32_t Instr;
-
-
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@@ -680,15 +523,11 @@
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
-
-extern const Instr kALUMask;
-extern const Instr kAddPattern;
-extern const Instr kSubPattern;
-extern const Instr kAndPattern;
-extern const Instr kBicPattern;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
+
+
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -890,6 +729,7 @@
void cmp(Register src1, Register src2, Condition cond = al) {
cmp(src1, Operand(src2), cond);
}
+ void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al);
@@ -1001,7 +841,6 @@
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
- static const int kDefaultStopCode = -1;
void stop(const char* msg,
Condition cond = al,
int32_t code = kDefaultStopCode);
@@ -1104,39 +943,38 @@
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
- enum ConversionMode {
- FPSCRRounding = 0,
- RoundToZero = 1
- };
void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
- ConversionMode mode = RoundToZero,
+ VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
+ void vabs(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
void vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -1262,6 +1100,7 @@
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
+ static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
@@ -1272,6 +1111,8 @@
static bool IsAddRegisterImmediate(Instr instr);
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
+ static Register GetRn(Instr instr);
+ static Register GetRm(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
static bool IsStrRegFpOffset(Instr instr);
@@ -1279,6 +1120,11 @@
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
+ static bool IsTstImmediate(Instr instr);
+ static bool IsCmpRegister(Instr instr);
+ static bool IsCmpImmediate(Instr instr);
+ static Register GetCmpImmediateRegister(Instr instr);
+ static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 0210b1b..f14d77a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -190,7 +190,7 @@
// Check whether an empty sized array is requested.
__ tst(array_size, array_size);
- __ b(nz, ¬_empty);
+ __ b(ne, ¬_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
@@ -566,7 +566,7 @@
// if it's a string already before calling the conversion builtin.
Label convert_argument;
__ bind(¬_cached);
- __ BranchOnSmi(r0, &convert_argument);
+ __ JumpIfSmi(r0, &convert_argument);
// Is it a String?
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -666,7 +666,7 @@
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
- __ b(nz, &rt_call);
+ __ b(ne, &rt_call);
#endif
// Load the initial map and verify that it is in fact a map.
@@ -1156,12 +1156,48 @@
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- __ stop("builtins-arm.cc: NotifyOSR");
+ // For now, we are relying on the fact that Runtime::NotifyOSR
+ // doesn't do any garbage collection which allows us to save/restore
+ // the registers without worrying about which of them contain
+ // pointers. This seems a bit fragile.
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
+ __ EnterInternalFrame();
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ __ LeaveInternalFrame();
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
+ __ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- __ stop("builtins-arm.cc: OnStackReplacement");
+ // Probe the CPU to set the supported features, because this builtin
+ // may be called before the initialization performs CPU setup.
+ CpuFeatures::Probe(false);
+
+ // Lookup the function in the JavaScript frame and push it as an
+ // argument to the on-stack replacement function.
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ LeaveInternalFrame();
+
+ // If the result was -1 it means that we couldn't optimize the
+ // function. Just return and continue in the unoptimized version.
+ Label skip;
+ __ cmp(r0, Operand(Smi::FromInt(-1)));
+ __ b(ne, &skip);
+ __ Ret();
+
+ __ bind(&skip);
+ // Untag the AST id and push it on the stack.
+ __ SmiUntag(r0);
+ __ push(r0);
+
+ // Generate the code for doing the frame-to-frame translation using
+ // the deoptimizer infrastructure.
+ Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+ generator.Generate();
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 8589cf0..1e7d558 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,7 +41,7 @@
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
+ Condition cond,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
@@ -49,12 +49,32 @@
Label* lhs_not_nan,
Label* slow,
bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in eax.
+ Label check_heap_number, call_builtin;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &check_heap_number);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &call_builtin);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
+}
+
+
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
@@ -344,6 +364,203 @@
}
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum Destination {
+ kVFPRegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will still be scratched. If
+ // either r0 or r1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with r0 and r1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
+ // Loads the number from object into dst as a 32-bit integer if possible. If
+ // the object is not a 32-bit integer control continues at the label
+ // not_int32. If VFP is supported double_scratch is used but not scratch2.
+ static void LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ DwVfpRegister double_scratch,
+ Label* not_int32);
+
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(d7.high(), scratch1);
+ __ vcvt_f64_s32(d7, d7.high());
+ __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(d6.high(), scratch1);
+ __ vcvt_f64_s32(d6, d6.high());
+ if (destination == kCoreRegisters) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(scratch1, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
+ __ mov(scratch1, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+}
+
+
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (r0) to d6 or r2/r3.
+ LoadNumber(masm, destination,
+ r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (r1) to d7 or r0/r1.
+ LoadNumber(masm, destination,
+ r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+ Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+
+ Label is_smi, done;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+ // Handle loading a double from a heap number.
+ if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber to double register.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(dst, scratch1, HeapNumber::kValueOffset);
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ }
+ __ jmp(&done);
+
+ // Handle loading a double from a smi.
+ __ bind(&is_smi);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi to double using VFP instructions.
+ __ SmiUntag(scratch1, object);
+ __ vmov(dst.high(), scratch1);
+ __ vcvt_f64_s32(dst, dst.high());
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ vmov(dst1, dst2, dst);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write smi to dst1 and dst2 double format.
+ __ mov(scratch1, Operand(object));
+ ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ DwVfpRegister double_scratch,
+ Label* not_int32) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ Label is_smi, done;
+ __ JumpIfSmi(object, &is_smi);
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+ __ cmp(scratch1, heap_number_map);
+ __ b(ne, not_int32);
+ __ ConvertToInt32(
+ object, dst, scratch1, scratch2, double_scratch, not_int32);
+ __ jmp(&done);
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
+ __ bind(&done);
+}
+
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -395,7 +612,7 @@
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
+ Condition cond,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
@@ -404,31 +621,31 @@
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
- if (cc != eq || !never_nan_nan) {
+ if (cond != eq || !never_nan_nan) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
- if (cc == lt || cc == gt) {
+ if (cond == lt || cond == gt) {
__ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
__ b(ge, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
+ if (cond != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == le || cc == ge) {
+ if (cond == le || cond == ge) {
__ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r2);
__ b(ne, &return_equal);
- if (cc == le) {
+ if (cond == le) {
// undefined <= undefined should fail.
__ mov(r0, Operand(GREATER));
} else {
@@ -442,20 +659,20 @@
}
__ bind(&return_equal);
- if (cc == lt) {
+ if (cond == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cc == gt) {
+ } else if (cond == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else {
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
__ Ret();
- if (cc != eq || !never_nan_nan) {
+ if (cond != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check
// for NaN.
- if (cc != lt && cc != gt) {
+ if (cond != lt && cond != gt) {
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN.
@@ -479,10 +696,10 @@
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
// not (it's a NaN). For <= and >= we need to load r0 with the failing
// value if it's a NaN.
- if (cc != eq) {
+ if (cond != eq) {
// All-zero means Infinity means equal.
__ Ret(eq);
- if (cc == le) {
+ if (cond == le) {
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
@@ -589,7 +806,7 @@
}
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3;
@@ -629,7 +846,7 @@
__ bind(&one_is_nan);
// NaN comparisons always fail.
// Load whatever we need in r0 to make the comparison fail.
- if (cc == lt || cc == le) {
+ if (cond == lt || cond == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
@@ -641,7 +858,8 @@
// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
+ Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3;
@@ -649,7 +867,7 @@
Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cc == eq) {
+ if (cond == eq) {
// Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0.
__ cmp(rhs_mantissa, Operand(lhs_mantissa));
@@ -835,7 +1053,7 @@
Label is_smi;
Label load_result_from_cache;
if (!object_is_smi) {
- __ BranchOnSmi(object, &is_smi);
+ __ JumpIfSmi(object, &is_smi);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
@@ -861,7 +1079,7 @@
Register probe = mask;
__ ldr(probe,
FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ BranchOnSmi(probe, not_found);
+ __ JumpIfSmi(probe, not_found);
__ sub(scratch2, object, Operand(kHeapObjectTag));
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
__ sub(probe, probe, Operand(kHeapObjectTag));
@@ -938,7 +1156,7 @@
} else if (FLAG_debug_code) {
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
- __ Assert(nz, "CompareStub: unexpected smi operands.");
+ __ Assert(ne, "CompareStub: unexpected smi operands.");
}
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -1080,7 +1298,7 @@
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result;
Label not_heap_number;
- Register scratch = r7;
+ Register scratch = r9.is(tos_) ? r7 : r9;
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
@@ -1375,7 +1593,7 @@
__ sub(r0, r5, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
- __ mov(pc, lr);
+ __ Ret();
} else {
// If we did not inline the operation, then the arguments are in:
// r0: Left value (least significant part of mantissa).
@@ -1505,7 +1723,7 @@
__ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
- __ ConvertToInt32(lhs, r3, r5, r4, &slow);
+ __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
__ jmp(&done_checking_lhs);
__ bind(&lhs_is_smi);
__ mov(r3, Operand(lhs, ASR, 1));
@@ -1516,7 +1734,7 @@
__ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
__ cmp(r4, heap_number_map);
__ b(ne, &slow);
- __ ConvertToInt32(rhs, r2, r5, r4, &slow);
+ __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
__ jmp(&done_checking_rhs);
__ bind(&rhs_is_smi);
__ mov(r2, Operand(rhs, ASR, 1));
@@ -1960,7 +2178,7 @@
Label not_smi;
if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
Label lhs_is_unsuitable;
- __ BranchOnNotSmi(lhs, ¬_smi);
+ __ JumpIfNotSmi(lhs, ¬_smi);
if (IsPowerOf2(constant_rhs_)) {
if (op_ == Token::MOD) {
__ and_(rhs,
@@ -2207,8 +2425,638 @@
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
+ TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+ return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(r1, r0);
+
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(r2, r1, r0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
UNIMPLEMENTED();
- return Handle<Code>::null();
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operands_type_) {
+ case TRBinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRBinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRBinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case TRBinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRBinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case TRBinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingBinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRBinaryOpIC::GetName(operands_type_));
+ return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
+ MacroAssembler* masm) {
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ ASSERT(right.is(r0));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op_) {
+ case Token::ADD:
+ __ add(right, left, Operand(right), SetCC); // Add optimistically.
+ __ Ret(vc);
+ __ sub(right, right, Operand(left)); // Revert optimistic add.
+ break;
+ case Token::SUB:
+ __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
+ __ Ret(vc);
+ __ sub(right, left, Operand(right)); // Revert optimistic subtract.
+ break;
+ case Token::MUL:
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(ip, right);
+ // Do multiplication
+ // scratch1 = lower 32 bits of ip * left.
+ // scratch2 = higher 32 bits of ip * left.
+ __ smull(scratch1, scratch2, left, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mov(ip, Operand(scratch1, ASR, 31));
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, ¬_smi_result);
+ // Go slow on zero result to handle -0.
+ __ tst(scratch1, Operand(scratch1));
+ __ mov(right, Operand(scratch1), LeaveCC, ne);
+ __ Ret(ne);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(scratch2, right, Operand(left), SetCC);
+ __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return smi 0 if the non-zero one was positive.
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ break;
+ case Token::DIV:
+ // Check for power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
+ // Check for positive and no remainder (scratch1 contains right - 1).
+ __ orr(scratch2, scratch1, Operand(0x80000000u));
+ __ tst(left, scratch2);
+ __ b(ne, ¬_smi_result);
+
+ // Perform division by shifting.
+ __ CountLeadingZeros(scratch1, scratch1, scratch2);
+ __ rsb(scratch1, scratch1, Operand(31));
+ __ mov(right, Operand(left, LSR, scratch1));
+ __ Ret();
+ break;
+ case Token::MOD:
+ // Check for two positive smis.
+ __ orr(scratch1, left, Operand(right));
+ __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, ¬_smi_result);
+
+ // Check for power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
+
+ // Perform modulus by masking.
+ __ and_(right, left, Operand(scratch1));
+ __ Ret();
+ break;
+ case Token::BIT_OR:
+ __ orr(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ eor(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ mov(right, Operand(left, ASR, scratch1));
+ // Smi tag result.
+ __ bic(right, right, Operand(kSmiTagMask));
+ __ Ret();
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSR, scratch2));
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ tst(scratch1, Operand(0xc0000000));
+ __ b(ne, ¬_smi_result);
+ // Smi tag result.
+ __ SmiTag(right, scratch1);
+ __ Ret();
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSL, scratch2));
+ // Check that the signed result fits in a Smi.
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
+ __ b(mi, ¬_smi_result);
+ __ SmiTag(right, scratch1);
+ __ Ret();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(¬_smi_result);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands && FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
+ // depending on whether VFP3 is available or not.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ // Allocate new heap number for result.
+ Register result = r5;
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+
+ // Load the operands.
+ if (smi_operands) {
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ } else {
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
+ }
+
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Using VFP registers:
+ // d6: Left value
+ // d7: Right value
+ CpuFeatures::Scope scope(VFP3);
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d6, d7);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d6, d7);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d6, d7);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d6, d7);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ sub(r0, result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+
+ // Push the current return address before the C call. Return will be
+ // through pop(pc) below.
+ __ push(lr);
+ __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(scratch1, result, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+#else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
+#endif
+ // Plase result in r0 and return to the pushed return address.
+ __ mov(r0, Operand(result));
+ __ pop(pc);
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ if (smi_operands) {
+ __ SmiUntag(r3, left);
+ __ SmiUntag(r2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in r2 and left in r3.
+ FloatingPointHelper::LoadNumberAsInteger(masm,
+ left,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ d0,
+ not_numbers);
+ FloatingPointHelper::LoadNumberAsInteger(masm,
+ right,
+ r2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ d0,
+ not_numbers);
+ }
+
+ Label result_not_a_smi;
+ switch (op_) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ GetLeastBitsFromInt32(r2, r2, 5);
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(r2, r2, 5);
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of
+ // writing the register as an unsigned int so we go to slow case if we
+ // hit this case.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi, &result_not_a_smi);
+ } else {
+ __ b(mi, not_numbers);
+ }
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(r2, r2, 5);
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check that the *signed* result fits in a smi.
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ __ SmiTag(r0, r2);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ __ AllocateHeapNumber(
+ r5, scratch1, scratch2, heap_number_map, gc_required);
+
+ // r2: Answer as signed int32.
+ // r5: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(r5));
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
+ // mentioned above SHR needs to always produce a positive result.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ if (op_ == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Label not_smis;
+
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ // Perform combined smi check on both operands.
+ __ orr(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(scratch1, Operand(kSmiTagMask));
+ __ b(ne, ¬_smis);
+
+ // If the smi-smi operation results in a smi return is generated.
+ GenerateSmiSmiOperation(masm);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, NULL, gc_required);
+ }
+ __ bind(¬_smis);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smis, call_runtime;
+
+ if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+ result_type_ == TRBinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ Label not_numbers, call_runtime;
+ ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
+
+ GenerateFPOperation(masm, false, ¬_numbers, &call_runtime);
+
+ __ bind(¬_numbers);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ // If all else fails, use the runtime system to get the correct
+ // result.
+ __ bind(&call_runtime);
+
+ // Try to add strings before calling runtime.
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ GenericBinaryOpStub stub(op_, mode_, r1, r0);
+ __ TailCallStub(&stub);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+
+ Register left = r1;
+ Register right = r0;
+ Label call_runtime;
+
+ // Check if first argument is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // First argument is a a string, test second.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(r0) && !result.is(r1));
+
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ b(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, Operand(overwritable_operand));
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode_ == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ Push(r1, r0);
}
@@ -2220,7 +3068,7 @@
if (CpuFeatures::IsSupported(VFP3)) {
// Load argument and check if it is a smi.
- __ BranchOnNotSmi(r0, &input_not_smi);
+ __ JumpIfNotSmi(r0, &input_not_smi);
CpuFeatures::Scope scope(VFP3);
// Input is a smi. Convert to double and load the low and high words
@@ -2374,7 +3222,7 @@
} else if (op_ == Token::BIT_NOT) {
if (include_smi_code_) {
Label non_smi;
- __ BranchOnNotSmi(r0, &non_smi);
+ __ JumpIfNotSmi(r0, &non_smi);
__ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask));
@@ -2392,7 +3240,7 @@
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, &slow);
+ __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
// Do the bitwise operation (move negated) and check if the result
// fits in a smi.
@@ -2558,8 +3406,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate,
- int frame_alignment_skew) {
+ bool always_allocate) {
// r0: result parameter for PerformGC, if any
// r4: number of arguments including receiver (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -2585,15 +3432,14 @@
__ mov(r0, Operand(r4));
__ mov(r1, Operand(r6));
+#if defined(V8_HOST_ARCH_ARM)
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
-#if defined(V8_HOST_ARCH_ARM)
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
ASSERT(IsPowerOf2(frame_alignment));
- __ sub(r2, sp, Operand(frame_alignment_skew));
- __ tst(r2, Operand(frame_alignment_mask));
+ __ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
__ stop("Unexpected alignment");
@@ -2602,35 +3448,20 @@
}
#endif
- // Just before the call (jump) below lr is pushed, so the actual alignment is
- // adding one to the current skew.
- int alignment_before_call =
- (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
- if (alignment_before_call > 0) {
- // Push until the alignment before the call is met.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- for (int i = alignment_before_call;
- (i & frame_alignment_mask) != 0;
- i += kPointerSize) {
- __ push(r2);
- }
- }
-
// TODO(1242173): To let the GC traverse the return address of the exit
// frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
+ // we store it on the stack to be able to find it again, but we never
// restore from it in case of changes, which makes it impossible to
// support moving the C entry code stub. This should be fixed, but currently
// this is OK because the CEntryStub gets generated so early in the V8 boot
// sequence that it is not moving ever.
- masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
- masm->push(lr);
- masm->Jump(r5);
- // Restore sp back to before aligning the stack.
- if (alignment_before_call > 0) {
- __ add(sp, sp, Operand(alignment_before_call));
- }
+ // Compute the return address in lr to return to after the jump below. Pc is
+ // already at '+ 8' from the current instruction but return is after three
+ // instructions so add another 4 to pc to get the return address.
+ masm->add(lr, pc, Operand(4));
+ __ str(lr, MemOperand(sp, 0));
+ masm->Jump(r5);
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -2701,9 +3532,17 @@
// this by performing a garbage collection and retrying the
// builtin once.
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r6, r6, Operand(kPointerSize));
+
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(save_doubles_);
+ // Setup argc and the builtin function in callee-saved registers.
+ __ mov(r4, Operand(r0));
+ __ mov(r5, Operand(r1));
+
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
// r6: pointer to first argument (C callee-saved)
@@ -2718,8 +3557,7 @@
&throw_termination_exception,
&throw_out_of_memory_exception,
false,
- false,
- -kPointerSize);
+ false);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
@@ -2727,8 +3565,7 @@
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
- false,
- 0);
+ false);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
@@ -2738,8 +3575,7 @@
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
- true,
- kPointerSize);
+ true);
__ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
@@ -2890,79 +3726,144 @@
}
-// Uses registers r0 to r4. Expected input is
-// object in r0 (or at sp+1*kPointerSize) and function in
-// r1 (or at sp), depending on whether or not
-// args_in_registers() is true.
+// Uses registers r0 to r4.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: r0 or at sp + 1 * kPointerSize.
+// * function: r1 or at sp.
+//
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register r4.
+// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ // ReturnTrueFalse is only implemented for inlined call sites.
+ ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
// Fixed register usage throughout the stub:
const Register object = r0; // Object (lhs).
- const Register map = r3; // Map of the object.
+ Register map = r3; // Map of the object.
const Register function = r1; // Function (rhs).
const Register prototype = r4; // Prototype of the function.
+ const Register inline_site = r9;
const Register scratch = r2;
+
+ const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
+
Label slow, loop, is_instance, is_not_instance, not_js_object;
+
if (!HasArgsInRegisters()) {
__ ldr(object, MemOperand(sp, 1 * kPointerSize));
__ ldr(function, MemOperand(sp, 0));
}
// Check that the left hand is a JS object and load map.
- __ BranchOnSmi(object, ¬_js_object);
+ __ JumpIfSmi(object, ¬_js_object);
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(function, ip);
- __ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(map, ip);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
+ __ cmp(function, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
+ __ cmp(map, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
- __ bind(&miss);
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
- __ BranchOnSmi(prototype, &slow);
+ __ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ ASSERT(HasArgsInRegisters());
+ // Patch the (relocated) inlined map check.
+
+ // The offset was stored in r4 safepoint slot.
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
+ __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
+ __ sub(inline_site, lr, scratch);
+ // Get the map location in scratch and patch it.
+ __ GetRelocatedValueLocation(inline_site, scratch);
+ __ str(map, MemOperand(scratch));
+ }
// Register mapping: r3 is object map and r4 is function prototype.
// Get prototype of object into r2.
__ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // We don't need map any more. Use it as a scratch register.
+ Register scratch2 = map;
+ map = no_reg;
+
// Loop through the prototype chain looking for the function prototype.
+ __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmp(scratch, Operand(prototype));
__ b(eq, &is_instance);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(scratch, ip);
+ __ cmp(scratch, scratch2);
__ b(eq, &is_not_instance);
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Patch the call site to return true.
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ GetRelocatedValueLocation(inline_site, scratch);
+ __ str(r0, MemOperand(scratch));
+
+ if (!ReturnTrueFalseObject()) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ }
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&is_not_instance);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Patch the call site to return false.
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ GetRelocatedValueLocation(inline_site, scratch);
+ __ str(r0, MemOperand(scratch));
+
+ if (!ReturnTrueFalseObject()) {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ }
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
__ bind(¬_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
- __ BranchOnSmi(function, &slow);
- __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE);
+ __ JumpIfSmi(function, &slow);
+ __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow);
// Null is not instance of anything.
@@ -2973,7 +3874,7 @@
__ bind(&object_not_null);
// Smi values are not instances of anything.
- __ BranchOnNotSmi(object, &object_not_null_or_smi);
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -2985,13 +3886,30 @@
// Slow-case. Tail call builtin.
__ bind(&slow);
- if (HasArgsInRegisters()) {
- __ Push(r0, r1);
- }
+ if (!ReturnTrueFalseObject()) {
+ if (HasArgsInRegisters()) {
+ __ Push(r0, r1);
+ }
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+ } else {
+ __ EnterInternalFrame();
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
+ __ LeaveInternalFrame();
+ __ cmp(r0, Operand(0));
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+ }
}
+Register InstanceofStub::left() { return r0; }
+
+
+Register InstanceofStub::right() { return r1; }
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -3000,7 +3918,7 @@
// Check that the key is a smi.
Label slow;
- __ BranchOnNotSmi(r1, &slow);
+ __ JumpIfNotSmi(r1, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -3204,7 +4122,7 @@
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
}
@@ -3307,7 +4225,7 @@
// Is first part a flat string?
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(nz, &runtime);
+ __ b(ne, &runtime);
__ bind(&seq_string);
// subject: Subject string
@@ -3582,7 +4500,7 @@
__ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
// Check if receiver is a smi (which is a number value).
- __ BranchOnSmi(r1, &receiver_is_value);
+ __ JumpIfSmi(r1, &receiver_is_value);
// Check if the receiver is a valid JS object.
__ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
@@ -3605,7 +4523,7 @@
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
- __ BranchOnSmi(r1, &slow);
+ __ JumpIfSmi(r1, &slow);
// Get the map of the function object.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &slow);
@@ -3703,14 +4621,13 @@
// StringCharCodeAtGenerator
-
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- __ BranchOnSmi(object_, receiver_not_string_);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
@@ -3720,7 +4637,7 @@
__ b(ne, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- __ BranchOnNotSmi(index_, &index_not_smi_);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
// Put smi-tagged index into scratch register.
__ mov(scratch_, index_);
@@ -3756,13 +4673,13 @@
// If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
- __ b(nz, &call_runtime_);
+ __ b(ne, &call_runtime_);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ tst(result_, Operand(kStringEncodingMask));
- __ b(nz, &ascii_string);
+ __ b(ne, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register. We can
@@ -3817,7 +4734,7 @@
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ BranchOnNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -3847,7 +4764,7 @@
__ tst(code_,
Operand(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case_);
+ __ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ascii char code.
@@ -4294,7 +5211,7 @@
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
// if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, nz);
+ __ mov(hash, Operand(27), LeaveCC, ne);
}
@@ -4862,6 +5779,56 @@
}
+void StringCharAtStub::Generate(MacroAssembler* masm) {
+ // Expects two arguments (object, index) on the stack:
+ // lr: return address
+ // sp[0]: index
+ // sp[4]: object
+ Register object = r1;
+ Register index = r0;
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+ Register result = r0;
+
+ // Get object and index from the stack.
+ __ pop(index);
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ mov(result, Operand(Smi::FromInt(0)));
+ __ b(&done);
+
+ StubRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&done);
+ __ Ret();
+}
+
+
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
@@ -4873,10 +5840,9 @@
// For equality we do not care about the sign of the result.
__ sub(r0, r0, r1, SetCC);
} else {
- __ sub(r1, r1, r0, SetCC);
- // Correct sign of result in case of overflow.
- __ rsb(r1, r1, Operand(0), SetCC, vs);
- __ mov(r0, r1);
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(r1);
+ __ sub(r0, r1, SmiUntagOperand(r0));
}
__ Ret();
@@ -4978,6 +5944,90 @@
}
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ __ ldr(pc, MemOperand(sp, 0));
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ ApiFunction *function) {
+ __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET));
+ // Push return address (accessible to GC through exit frame pc).
+ __ mov(r2,
+ Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
+ __ str(pc, MemOperand(sp, 0));
+ __ Jump(r2); // Call the api function.
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - set to be the receiver's elements on exit.
+ //
+ // elements_map - set to be the map of the receiver's elements
+ // on exit.
+ //
+ // result - holds the result of the pixel array load on exit,
+ // tagged as a smi if successful.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used a scratch register in map check, if map
+ // check is successful, contains the length of the
+ // pixel array, the pointer to external elements and
+ // the untagged result.
+ //
+ // scratch2 - holds the untaged key.
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiUntag(scratch2, key);
+
+ // Verify that the receiver has pixel array elements.
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
+ not_pixel_array, true);
+
+ // Key must be in range of the pixel array.
+ __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
+ __ cmp(scratch2, scratch1);
+ __ b(hs, out_of_range); // unsigned check handles negative keys.
+
+ // Perform the indexed load and tag the result as a smi.
+ __ ldr(scratch1,
+ FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+ __ ldrb(scratch1, MemOperand(scratch1, scratch2));
+ __ SmiTag(r0, scratch1);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 9fa8687..bf7d635 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -218,6 +218,120 @@
};
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+ TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(TRBinaryOpIC::UNINITIALIZED),
+ result_type_(TRBinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ TypeRecordingBinaryOpStub(
+ int key,
+ TRBinaryOpIC::TypeInfo operands_type,
+ TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_vfp3_(VFP3Bits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_vfp3_;
+
+ // Operand type information determined at runtime.
+ TRBinaryOpIC::TypeInfo operands_type_;
+ TRBinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRBinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class VFP3Bits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return TypeRecordingBinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | VFP3Bits::encode(use_vfp3_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRBinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_binary_op_type(operands_type_);
+ code->set_type_recording_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -457,6 +571,45 @@
};
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, ApiFunction *function);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "DirectCEntryStub"; }
+};
+
+
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 264498d..81ed2d0 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -39,7 +39,7 @@
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 4a982f6..c827110 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1110,7 +1110,7 @@
Register int32 = r2;
// Not a 32bits signed int, fall back to the GenericBinaryOpStub.
- __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+ __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
// tos_register_ (r0 or r1): Original heap number.
// int32: signed 32bits int.
@@ -1589,7 +1589,7 @@
}
-void CodeGenerator::Comparison(Condition cc,
+void CodeGenerator::Comparison(Condition cond,
Expression* left,
Expression* right,
bool strict) {
@@ -1603,7 +1603,7 @@
// result : cc register
// Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == eq);
+ ASSERT(!strict || cond == eq);
Register lhs;
Register rhs;
@@ -1614,8 +1614,8 @@
// We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == gt || cc == le) {
- cc = ReverseCondition(cc);
+ if (cond == gt || cond == le) {
+ cond = ReverseCondition(cond);
lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
@@ -1655,7 +1655,7 @@
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
- CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
+ CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump();
@@ -1667,7 +1667,7 @@
__ cmp(lhs, Operand(rhs));
exit.Bind();
- cc_reg_ = cc;
+ cc_reg_ = cond;
}
@@ -1762,7 +1762,7 @@
// sp[2]: applicand.
// Check that the receiver really is a JavaScript object.
- __ BranchOnSmi(receiver_reg, &build_args);
+ __ JumpIfSmi(receiver_reg, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@@ -1774,7 +1774,7 @@
// Check that applicand.apply is Function.prototype.apply.
__ ldr(r0, MemOperand(sp, kPointerSize));
- __ BranchOnSmi(r0, &build_args);
+ __ JumpIfSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
@@ -1785,7 +1785,7 @@
// Check that applicand is a function.
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ BranchOnSmi(r1, &build_args);
+ __ JumpIfSmi(r1, &build_args);
__ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
@@ -1885,8 +1885,8 @@
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
ASSERT(has_cc());
- Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- target->Branch(cc);
+ Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+ target->Branch(cond);
cc_reg_ = al;
}
@@ -2192,15 +2192,10 @@
DeleteFrame();
#ifdef DEBUG
- // Check that the size of the code used for returning matches what is
- // expected by the debugger. If the sp_delts above cannot be encoded in
- // the add instruction the add will generate two instructions.
- int return_sequence_length =
- masm_->InstructionsGeneratedSince(&check_exit_codesize);
- CHECK(return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions ||
- return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions + 1);
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceInstructions <=
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
}
@@ -4177,7 +4172,10 @@
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ // Push the strict mode flag.
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump();
slow.Bind();
@@ -4197,8 +4195,11 @@
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
+ // Push the strict mode flag.
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+
// Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
@@ -4618,8 +4619,8 @@
ASSERT(runtime.entry_frame() == NULL);
runtime.set_entry_frame(frame_);
- __ BranchOnNotSmi(exponent, &exponent_nonsmi);
- __ BranchOnNotSmi(base, &base_nonsmi);
+ __ JumpIfNotSmi(exponent, &exponent_nonsmi);
+ __ JumpIfNotSmi(base, &base_nonsmi);
heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@@ -4698,12 +4699,15 @@
runtime.entry_label(),
AVOID_NANS_AND_INFINITIES);
+ // Convert -0 into +0 by adding +0.
+ __ vmov(d2, 0.0);
+ __ vadd(d0, d2, d0);
// Load 1.0 into d2.
__ vmov(d2, 1.0);
- // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
- __ vdiv(d0, d2, d0);
+ // Calculate the reciprocal of the square root.
__ vsqrt(d0, d0);
+ __ vdiv(d0, d2, d0);
__ b(&allocate_return);
@@ -4717,6 +4721,9 @@
scratch1, scratch2, heap_number_map, s0,
runtime.entry_label(),
AVOID_NANS_AND_INFINITIES);
+ // Convert -0 into +0 by adding +0.
+ __ vmov(d2, 0.0);
+ __ vadd(d0, d2, d0);
__ vsqrt(d0, d0);
__ bind(&allocate_return);
@@ -5572,7 +5579,7 @@
deferred->Branch(lt);
__ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(nz);
+ deferred->Branch(ne);
// Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
@@ -5589,7 +5596,7 @@
__ mov(tmp2, index1);
__ orr(tmp2, tmp2, index2);
__ tst(tmp2, Operand(kSmiTagMask));
- deferred->Branch(nz);
+ deferred->Branch(ne);
// Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
@@ -5849,14 +5856,10 @@
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // lookup the context holding the named variable
+ // Delete from the context holding the named variable.
frame_->EmitPush(cp);
frame_->EmitPush(Operand(variable->name()));
- frame_->CallRuntime(Runtime::kLookupContext, 2);
- // r0: context
- frame_->EmitPush(r0);
- frame_->EmitPush(Operand(variable->name()));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->EmitPush(r0);
} else {
@@ -6923,7 +6926,7 @@
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- frame()->CallStoreIC(name, is_contextual);
+ frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
} else {
// Inline the in-object property case.
JumpTarget slow, done;
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 589e704..8f46256 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -287,6 +287,7 @@
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 3df7b4e..bf9da23 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -32,12 +32,10 @@
#include "constants-arm.h"
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
-namespace v8i = v8::internal;
-
-double Instr::DoubleImmedVmov() const {
+double Instruction::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction.
//
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@@ -149,6 +147,6 @@
}
-} } // namespace assembler::arm
+} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 36f6283..7ac38ed 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -86,8 +86,8 @@
#define USE_BLX 1
#endif
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
@@ -102,6 +102,9 @@
static const int kPCRegister = 15;
static const int kNoRegister = -1;
+// -----------------------------------------------------------------------------
+// Conditions.
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
//
@@ -111,93 +114,262 @@
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
-typedef unsigned char byte;
-
// Values for the condition field as defined in section A3.2
enum Condition {
- no_condition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
- GE = 10, // signed greater than or equal
- LT = 11, // signed less than
- GT = 12, // signed greater than
- LE = 13, // signed less than or equal
- AL = 14, // always (unconditional)
- special_condition = 15, // special condition (refer to section A3.2.1)
- max_condition = 16
+ kNoCondition = -1,
+
+ eq = 0 << 28, // Z set Equal.
+ ne = 1 << 28, // Z clear Not equal.
+ cs = 2 << 28, // C set Unsigned higher or same.
+ cc = 3 << 28, // C clear Unsigned lower.
+ mi = 4 << 28, // N set Negative.
+ pl = 5 << 28, // N clear Positive or zero.
+ vs = 6 << 28, // V set Overflow.
+ vc = 7 << 28, // V clear No overflow.
+ hi = 8 << 28, // C set, Z clear Unsigned higher.
+ ls = 9 << 28, // C clear or Z set Unsigned lower or same.
+ ge = 10 << 28, // N == V Greater or equal.
+ lt = 11 << 28, // N != V Less than.
+ gt = 12 << 28, // Z clear, N == V Greater than.
+ le = 13 << 28, // Z set or N != V Less then or equal
+ al = 14 << 28, // Always.
+
+ kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
+ kNumberOfConditions = 16,
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc // C clear Unsigned lower.
};
+inline Condition NegateCondition(Condition cond) {
+ ASSERT(cond != al);
+ return static_cast<Condition>(cond ^ ne);
+}
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cond;
+ };
+}
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+
// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4
enum Opcode {
- no_operand = -1,
- AND = 0, // Logical AND
- EOR = 1, // Logical Exclusive OR
- SUB = 2, // Subtract
- RSB = 3, // Reverse Subtract
- ADD = 4, // Add
- ADC = 5, // Add with Carry
- SBC = 6, // Subtract with Carry
- RSC = 7, // Reverse Subtract with Carry
- TST = 8, // Test
- TEQ = 9, // Test Equivalence
- CMP = 10, // Compare
- CMN = 11, // Compare Negated
- ORR = 12, // Logical (inclusive) OR
- MOV = 13, // Move
- BIC = 14, // Bit Clear
- MVN = 15, // Move Not
- max_operand = 16
+ AND = 0 << 21, // Logical AND.
+ EOR = 1 << 21, // Logical Exclusive OR.
+ SUB = 2 << 21, // Subtract.
+ RSB = 3 << 21, // Reverse Subtract.
+ ADD = 4 << 21, // Add.
+ ADC = 5 << 21, // Add with Carry.
+ SBC = 6 << 21, // Subtract with Carry.
+ RSC = 7 << 21, // Reverse Subtract with Carry.
+ TST = 8 << 21, // Test.
+ TEQ = 9 << 21, // Test Equivalence.
+ CMP = 10 << 21, // Compare.
+ CMN = 11 << 21, // Compare Negated.
+ ORR = 12 << 21, // Logical (inclusive) OR.
+ MOV = 13 << 21, // Move.
+ BIC = 14 << 21, // Bit Clear.
+ MVN = 15 << 21 // Move Not.
};
// The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 {
// With bits 22-21 01.
- BX = 1,
- BXJ = 2,
- BLX = 3,
- BKPT = 7,
+ BX = 1 << 4,
+ BXJ = 2 << 4,
+ BLX = 3 << 4,
+ BKPT = 7 << 4,
// With bits 22-21 11.
- CLZ = 1
+ CLZ = 1 << 4
+};
+
+
+// Instruction encoding bits and masks.
+enum {
+ H = 1 << 5, // Halfword (or byte).
+ S6 = 1 << 6, // Signed (or unsigned).
+ L = 1 << 20, // Load (or store).
+ S = 1 << 20, // Set condition code (or leave unchanged).
+ W = 1 << 21, // Writeback base register (or leave unchanged).
+ A = 1 << 21, // Accumulate in multiply instruction (or not).
+ B = 1 << 22, // Unsigned byte (or word).
+ N = 1 << 22, // Long (or short).
+ U = 1 << 23, // Positive (or negative) offset/index.
+ P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
+ I = 1 << 25, // Immediate shifter operand (or not).
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+ B28 = 1 << 28,
+
+ // Instruction bit masks.
+ kCondMask = 15 << 28,
+ kALUMask = 0x6f << 21,
+ kRdMask = 15 << 12, // In str instruction.
+ kCoprocessorMask = 15 << 8,
+ kOpCodeMask = 15 << 21, // In data-processing instructions.
+ kImm24Mask = (1 << 24) - 1,
+ kOff12Mask = (1 << 12) - 1
+};
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Condition code updating mode.
+enum SBit {
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
+};
+
+
+// Status register selection.
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
};
// Shifter types for Data-processing operands as defined in section A5.1.2.
-enum Shift {
- no_shift = -1,
- LSL = 0, // Logical shift left
- LSR = 1, // Logical shift right
- ASR = 2, // Arithmetic shift right
- ROR = 3, // Rotate right
- max_shift = 4
+enum ShiftOp {
+ LSL = 0 << 5, // Logical shift left.
+ LSR = 1 << 5, // Logical shift right.
+ ASR = 2 << 5, // Arithmetic shift right.
+ ROR = 3 << 5, // Rotate right.
+
+ // RRX is encoded as ROR with shift_imm == 0.
+ // Use a special code to make the distinction. The RRX ShiftOp is only used
+ // as an argument, and will never actually be encoded. The Assembler will
+ // detect it and emit the correct ROR shift operand with shift_imm == 0.
+ RRX = -1,
+ kNumberOfShifts = 4
};
+// Status register fields.
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values).
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode.
+enum AddrMode {
+ // Bit encoding P U W.
+ Offset = (8|4|0) << 21, // Offset (without writeback to base).
+ PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
+ PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
+ NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
+ NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
+ NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+ // Bit encoding P U W .
+ da = (0|0|0) << 21, // Decrement after.
+ ia = (0|4|0) << 21, // Increment after.
+ db = (8|0|0) << 21, // Decrement before.
+ ib = (8|4|0) << 21, // Increment before.
+ da_w = (0|0|1) << 21, // Decrement after with writeback to base.
+ ia_w = (0|4|1) << 21, // Increment after with writeback to base.
+ db_w = (8|0|1) << 21, // Decrement before with writeback to base.
+ ib_w = (8|4|1) << 21, // Increment before with writeback to base.
+
+ // Alias modes for comparison when writeback does not matter.
+ da_x = (0|0|0) << 21, // Decrement after.
+ ia_x = (0|4|0) << 21, // Increment after.
+ db_x = (8|0|0) << 21, // Decrement before.
+ ib_x = (8|4|0) << 21 // Increment before.
+};
+
+
+// Coprocessor load/store operand size.
+enum LFlag {
+ Long = 1 << 22, // Long load/store coprocessor.
+ Short = 0 << 22 // Short load/store coprocessor.
+};
+
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
// Special Software Interrupt codes when used in the presence of the ARM
// simulator.
// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
enum SoftwareInterruptCodes {
// transition to C code
- call_rt_redirected = 0x10,
+ kCallRtRedirected= 0x10,
// break point
- break_point = 0x20,
+ kBreakpoint= 0x20,
// stop
- stop = 1 << 23
+ kStopCode = 1 << 23
};
-static const int32_t kStopCodeMask = stop - 1;
-static const uint32_t kMaxStopCode = stop - 1;
+static const uint32_t kStopCodeMask = kStopCode - 1;
+static const uint32_t kMaxStopCode = kStopCode - 1;
+static const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
@@ -206,30 +378,123 @@
kDoublePrecision = 1
};
-// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum FPSCRRoundingModes {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
+
+// VFP FPSCR constants.
+enum VFPConversionMode {
+ kFPSCRRounding = 0,
+ kDefaultRoundToZero = 1
};
-typedef int32_t instr_t;
+static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+static const uint32_t kVFPInvalidExceptionBit = 1;
+
+static const uint32_t kVFPNConditionFlagBit = 1 << 31;
+static const uint32_t kVFPZConditionFlagBit = 1 << 30;
+static const uint32_t kVFPCConditionFlagBit = 1 << 29;
+static const uint32_t kVFPVConditionFlagBit = 1 << 28;
-// The class Instr enables access to individual fields defined in the ARM
+// VFP rounding modes. See ARM DDI 0406B Page A2-29.
+enum VFPRoundingMode {
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+ kRoundToZero = RZ
+};
+
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-arm.cc, as they use named registers
+// and other constants.
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+extern const Instr kPopInstruction;
+
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+extern const Instr kPushRegPattern;
+
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+extern const Instr kPopRegPattern;
+
+// mov lr, pc
+extern const Instr kMovLrPc;
+// ldr rd, [pc, #offset]
+extern const Instr kLdrPCMask;
+extern const Instr kLdrPCPattern;
+// blxcc rm
+extern const Instr kBlxRegMask;
+
+extern const Instr kBlxRegPattern;
+
+extern const Instr kMovMvnMask;
+extern const Instr kMovMvnPattern;
+extern const Instr kMovMvnFlip;
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+extern const Instr kCmpCmnMask;
+extern const Instr kCmpCmnPattern;
+extern const Instr kCmpCmnFlip;
+extern const Instr kAddSubFlip;
+extern const Instr kAndBicFlip;
+
+// A mask for the Rd register for push, pop, ldr, str instructions.
+extern const Instr kLdrRegFpOffsetPattern;
+
+extern const Instr kStrRegFpOffsetPattern;
+
+extern const Instr kLdrRegFpNegOffsetPattern;
+
+extern const Instr kStrRegFpNegOffsetPattern;
+
+extern const Instr kLdrStrInstrTypeMask;
+extern const Instr kLdrStrInstrArgumentMask;
+extern const Instr kLdrStrOffsetMask;
+
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
// architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
//
// Example: Test whether the instruction at ptr does set the condition code
// bits.
//
// bool InstructionSetsConditionCodes(byte* ptr) {
-// Instr* instr = Instr::At(ptr);
-// int type = instr->TypeField();
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
-class Instr {
+class Instruction {
public:
enum {
kInstrSize = 4,
@@ -237,14 +502,24 @@
kPCReadOffset = 8
};
+ // Helper macro to define static accessors.
+ // We use the cast to char* trick to bypass the strict anti-aliasing rules.
+ #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+ #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
// Get the raw instruction bits.
- inline instr_t InstructionBits() const {
- return *reinterpret_cast<const instr_t*>(this);
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
- inline void SetInstructionBits(instr_t value) {
- *reinterpret_cast<instr_t*>(this) = value;
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
@@ -252,93 +527,143 @@
return (InstructionBits() >> nr) & 1;
}
- // Read a bit field out of the instruction bits.
+ // Read a bit field's value out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
+ // Read a bit field out of the instruction bits.
+ inline int BitField(int hi, int lo) const {
+ return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Static support.
+
+ // Read one particular bit out of the instruction bits.
+ static inline int Bit(Instr instr, int nr) {
+ return (instr >> nr) & 1;
+ }
+
+ // Read the value of a bit field out of the instruction bits.
+ static inline int Bits(Instr instr, int hi, int lo) {
+ return (instr >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+
+ // Read a bit field out of the instruction bits.
+ static inline int BitField(Instr instr, int hi, int lo) {
+ return instr & (((2 << (hi - lo)) - 1) << lo);
+ }
+
// Accessors for the different named fields used in the ARM encoding.
// The naming of these accessor corresponds to figure A3-1.
+ //
+ // Two kind of accessors are declared:
+ // - <Name>Field() will return the raw field, ie the field's bits at their
+ // original place in the instruction encoding.
+ // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+ // ConditionField(instr) will return 0xC0000000.
+ // - <Name>Value() will return the field value, shifted back to bit 0.
+ // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+ // ConditionField(instr) will return 0xC.
+
+
// Generally applicable fields
- inline Condition ConditionField() const {
+ inline Condition ConditionValue() const {
return static_cast<Condition>(Bits(31, 28));
}
- inline int TypeField() const { return Bits(27, 25); }
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(BitField(31, 28));
+ }
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
- inline int RnField() const { return Bits(19, 16); }
- inline int RdField() const { return Bits(15, 12); }
+ inline int TypeValue() const { return Bits(27, 25); }
- inline int CoprocessorField() const { return Bits(11, 8); }
+ inline int RnValue() const { return Bits(19, 16); }
+ DECLARE_STATIC_ACCESSOR(RnValue);
+ inline int RdValue() const { return Bits(15, 12); }
+ DECLARE_STATIC_ACCESSOR(RdValue);
+
+ inline int CoprocessorValue() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
- inline int VnField() const { return Bits(19, 16); }
- inline int VmField() const { return Bits(3, 0); }
- inline int VdField() const { return Bits(15, 12); }
- inline int NField() const { return Bit(7); }
- inline int MField() const { return Bit(5); }
- inline int DField() const { return Bit(22); }
- inline int RtField() const { return Bits(15, 12); }
- inline int PField() const { return Bit(24); }
- inline int UField() const { return Bit(23); }
- inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
- inline int Opc2Field() const { return Bits(19, 16); }
- inline int Opc3Field() const { return Bits(7, 6); }
- inline int SzField() const { return Bit(8); }
- inline int VLField() const { return Bit(20); }
- inline int VCField() const { return Bit(8); }
- inline int VAField() const { return Bits(23, 21); }
- inline int VBField() const { return Bits(6, 5); }
- inline int VFPNRegCode(VFPRegPrecision pre) {
- return VFPGlueRegCode(pre, 16, 7);
+ inline int VnValue() const { return Bits(19, 16); }
+ inline int VmValue() const { return Bits(3, 0); }
+ inline int VdValue() const { return Bits(15, 12); }
+ inline int NValue() const { return Bit(7); }
+ inline int MValue() const { return Bit(5); }
+ inline int DValue() const { return Bit(22); }
+ inline int RtValue() const { return Bits(15, 12); }
+ inline int PValue() const { return Bit(24); }
+ inline int UValue() const { return Bit(23); }
+ inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
+ inline int Opc2Value() const { return Bits(19, 16); }
+ inline int Opc3Value() const { return Bits(7, 6); }
+ inline int SzValue() const { return Bit(8); }
+ inline int VLValue() const { return Bit(20); }
+ inline int VCValue() const { return Bit(8); }
+ inline int VAValue() const { return Bits(23, 21); }
+ inline int VBValue() const { return Bits(6, 5); }
+ inline int VFPNRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 16, 7);
}
- inline int VFPMRegCode(VFPRegPrecision pre) {
- return VFPGlueRegCode(pre, 0, 5);
+ inline int VFPMRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 0, 5);
}
- inline int VFPDRegCode(VFPRegPrecision pre) {
- return VFPGlueRegCode(pre, 12, 22);
+ inline int VFPDRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 12, 22);
}
// Fields used in Data processing instructions
- inline Opcode OpcodeField() const {
+ inline int OpcodeValue() const {
return static_cast<Opcode>(Bits(24, 21));
}
- inline int SField() const { return Bit(20); }
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(BitField(24, 21));
+ }
+ inline int SValue() const { return Bit(20); }
// with register
- inline int RmField() const { return Bits(3, 0); }
- inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
- inline int RegShiftField() const { return Bit(4); }
- inline int RsField() const { return Bits(11, 8); }
- inline int ShiftAmountField() const { return Bits(11, 7); }
+ inline int RmValue() const { return Bits(3, 0); }
+ DECLARE_STATIC_ACCESSOR(RmValue);
+ inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
+ inline ShiftOp ShiftField() const {
+ return static_cast<ShiftOp>(BitField(6, 5));
+ }
+ inline int RegShiftValue() const { return Bit(4); }
+ inline int RsValue() const { return Bits(11, 8); }
+ inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
- inline int RotateField() const { return Bits(11, 8); }
- inline int Immed8Field() const { return Bits(7, 0); }
- inline int Immed4Field() const { return Bits(19, 16); }
- inline int ImmedMovwMovtField() const {
- return Immed4Field() << 12 | Offset12Field(); }
+ inline int RotateValue() const { return Bits(11, 8); }
+ inline int Immed8Value() const { return Bits(7, 0); }
+ inline int Immed4Value() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtValue() const {
+ return Immed4Value() << 12 | Offset12Value(); }
// Fields used in Load/Store instructions
- inline int PUField() const { return Bits(24, 23); }
- inline int BField() const { return Bit(22); }
- inline int WField() const { return Bit(21); }
- inline int LField() const { return Bit(20); }
+ inline int PUValue() const { return Bits(24, 23); }
+ inline int PUField() const { return BitField(24, 23); }
+ inline int BValue() const { return Bit(22); }
+ inline int WValue() const { return Bit(21); }
+ inline int LValue() const { return Bit(20); }
// with register uses same fields as Data processing instructions above
// with immediate
- inline int Offset12Field() const { return Bits(11, 0); }
+ inline int Offset12Value() const { return Bits(11, 0); }
// multiple
- inline int RlistField() const { return Bits(15, 0); }
+ inline int RlistValue() const { return Bits(15, 0); }
// extra loads and stores
- inline int SignField() const { return Bit(6); }
- inline int HField() const { return Bit(5); }
- inline int ImmedHField() const { return Bits(11, 8); }
- inline int ImmedLField() const { return Bits(3, 0); }
+ inline int SignValue() const { return Bit(6); }
+ inline int HValue() const { return Bit(5); }
+ inline int ImmedHValue() const { return Bits(11, 8); }
+ inline int ImmedLValue() const { return Bits(3, 0); }
// Fields used in Branch instructions
- inline int LinkField() const { return Bit(24); }
- inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+ inline int LinkValue() const { return Bit(24); }
+ inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
// Fields used in Software interrupt instructions
- inline SoftwareInterruptCodes SvcField() const {
+ inline SoftwareInterruptCodes SvcValue() const {
return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
}
@@ -352,39 +677,47 @@
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
+ // Test for a stop instruction.
+ inline bool IsStop() const {
+ return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
+ }
+
// Special accessors that test for existence of a value.
- inline bool HasS() const { return SField() == 1; }
- inline bool HasB() const { return BField() == 1; }
- inline bool HasW() const { return WField() == 1; }
- inline bool HasL() const { return LField() == 1; }
- inline bool HasU() const { return UField() == 1; }
- inline bool HasSign() const { return SignField() == 1; }
- inline bool HasH() const { return HField() == 1; }
- inline bool HasLink() const { return LinkField() == 1; }
+ inline bool HasS() const { return SValue() == 1; }
+ inline bool HasB() const { return BValue() == 1; }
+ inline bool HasW() const { return WValue() == 1; }
+ inline bool HasL() const { return LValue() == 1; }
+ inline bool HasU() const { return UValue() == 1; }
+ inline bool HasSign() const { return SignValue() == 1; }
+ inline bool HasH() const { return HValue() == 1; }
+ inline bool HasLink() const { return LinkValue() == 1; }
// Decoding the double immediate in the vmov instruction.
double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instr.
- // Use the At(pc) function to create references to Instr.
- static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
private:
// Join split register codes, depending on single or double precision.
// four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit
// specifier.
- inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
+ inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
}
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
}
- // We need to prevent the creation of instances of class Instr.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
@@ -423,6 +756,6 @@
};
-} } // namespace assembler::arm
+} } // namespace v8::internal
#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index b359dce..507954d 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -56,7 +56,7 @@
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
- assembler::arm::Simulator::FlushICache(start, size);
+ Simulator::FlushICache(start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 8a53d1c..caec55a 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,6 +37,14 @@
int Deoptimizer::table_entry_size_ = 16;
+
+int Deoptimizer::patch_size() {
+ const int kCallInstructionSizeInWords = 3;
+ return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
AssertNoAllocation no_allocation;
@@ -51,6 +59,8 @@
// For each return after a safepoint insert an absolute call to the
// corresponding deoptimization entry.
+ ASSERT(patch_size() % Assembler::kInstrSize == 0);
+ int call_size_in_words = patch_size() / Assembler::kInstrSize;
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
@@ -73,14 +83,13 @@
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- const int kCallInstructionSizeInWords = 3;
- CodePatcher patcher(code->instruction_start() + pc_offset + gap_code_size,
- kCallInstructionSizeInWords);
+ last_pc_offset += gap_code_size;
+ CodePatcher patcher(code->instruction_start() + last_pc_offset,
+ call_size_in_words);
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
deoptimization_index, Deoptimizer::LAZY);
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
- last_pc_offset +=
- gap_code_size + kCallInstructionSizeInWords * Assembler::kInstrSize;
+ last_pc_offset += patch_size();
}
}
@@ -88,7 +97,7 @@
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
- (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
+ (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
@@ -112,19 +121,159 @@
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
- Code* replacement_code) {
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+ ByteArray* translations = data->TranslationByteArray();
+ int length = data->DeoptCount();
+ for (int i = 0; i < length; i++) {
+ if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ TranslationIterator it(translations, data->TranslationIndex(i)->value());
+ int value = it.Next();
+ ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+ // Read the number of frames.
+ value = it.Next();
+ if (value == 1) return i;
+ }
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
void Deoptimizer::DoComputeOsrOutputFrame() {
- UNIMPLEMENTED();
+ DeoptimizationInputData* data = DeoptimizationInputData::cast(
+ optimized_code_->deoptimization_data());
+ unsigned ast_id = data->OsrAstId()->value();
+
+ int bailout_id = LookupBailoutId(data, ast_id);
+ unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+ ByteArray* translations = data->TranslationByteArray();
+
+ TranslationIterator iterator(translations, translation_index);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ ASSERT(Translation::BEGIN == opcode);
+ USE(opcode);
+ int count = iterator.Next();
+ ASSERT(count == 1);
+ USE(count);
+
+ opcode = static_cast<Translation::Opcode>(iterator.Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ unsigned node_id = iterator.Next();
+ USE(node_id);
+ ASSERT(node_id == ast_id);
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+ USE(function);
+ ASSERT(function == function_);
+ unsigned height = iterator.Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ USE(height_in_bytes);
+
+ unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned input_frame_size = input_->GetFrameSize();
+ ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+ unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+ unsigned outgoing_size = outgoing_height * kPointerSize;
+ unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+ ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
+ PrintF(" => node=%u, frame=%d->%d]\n",
+ ast_id,
+ input_frame_size,
+ output_frame_size);
+ }
+
+ // There's only one output frame in the OSR case.
+ output_count_ = 1;
+ output_ = new FrameDescription*[1];
+ output_[0] = new(output_frame_size) FrameDescription(
+ output_frame_size, function_);
+
+ // Clear the incoming parameters in the optimized frame to avoid
+ // confusing the garbage collector.
+ unsigned output_offset = output_frame_size - kPointerSize;
+ int parameter_count = function_->shared()->formal_parameter_count() + 1;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_[0]->SetFrameSlot(output_offset, 0);
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the incoming parameters. This may overwrite some of the
+ // incoming argument slots we've just cleared.
+ int input_offset = input_frame_size - kPointerSize;
+ bool ok = true;
+ int limit = input_offset - (parameter_count * kPointerSize);
+ while (ok && input_offset > limit) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Set them up explicitly.
+ for (int i = 0; ok && i < 4; i++) {
+ uint32_t input_value = input_->GetFrameSlot(input_offset);
+ if (FLAG_trace_osr) {
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
+ output_offset,
+ input_value,
+ input_offset);
+ }
+ output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+ input_offset -= kPointerSize;
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the rest of the frame.
+ while (ok && input_offset >= 0) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // If translation of any command failed, continue using the input frame.
+ if (!ok) {
+ delete output_[0];
+ output_[0] = input_;
+ output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+ } else {
+ // Setup the frame pointer and the context pointer.
+ output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
+ output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
+
+ unsigned pc_offset = data->OsrPcOffset()->value();
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ optimized_code_->entry() + pc_offset);
+ output_[0]->SetPc(pc);
+ }
+ Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ output_[0]->SetContinuation(
+ reinterpret_cast<uint32_t>(continuation->entry()));
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+ ok ? "finished" : "aborted",
+ reinterpret_cast<intptr_t>(function));
+ function->PrintName();
+ PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+ }
}
@@ -306,7 +455,6 @@
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- // TOS: bailout-id; TOS+1: return address if not EAGER.
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -341,6 +489,10 @@
__ mov(r3, Operand(0));
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ } else if (type() == OSR) {
+ __ mov(r3, lr);
+ // Correct one word for bailout id.
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ mov(r3, lr);
// Correct two words for bailout id and return address.
@@ -363,11 +515,10 @@
// frame descriptor pointer to r1 (deoptimizer->input_);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
@@ -384,7 +535,7 @@
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
- if (type() == EAGER) {
+ if (type() == EAGER || type() == OSR) {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
@@ -438,11 +589,6 @@
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
- UNIMPLEMENTED();
- }
-
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
@@ -456,7 +602,7 @@
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 297a2db..08f605b 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -64,10 +64,8 @@
#include "platform.h"
-namespace assembler {
-namespace arm {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
//------------------------------------------------------------------------------
@@ -78,7 +76,7 @@
class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
- v8::internal::Vector<char> out_buffer)
+ Vector<char> out_buffer)
: converter_(converter),
out_buffer_(out_buffer),
out_buffer_pos_(0) {
@@ -100,45 +98,45 @@
void PrintRegister(int reg);
void PrintSRegister(int reg);
void PrintDRegister(int reg);
- int FormatVFPRegister(Instr* instr, const char* format);
- void PrintMovwMovt(Instr* instr);
- int FormatVFPinstruction(Instr* instr, const char* format);
- void PrintCondition(Instr* instr);
- void PrintShiftRm(Instr* instr);
- void PrintShiftImm(Instr* instr);
- void PrintShiftSat(Instr* instr);
- void PrintPU(Instr* instr);
+ int FormatVFPRegister(Instruction* instr, const char* format);
+ void PrintMovwMovt(Instruction* instr);
+ int FormatVFPinstruction(Instruction* instr, const char* format);
+ void PrintCondition(Instruction* instr);
+ void PrintShiftRm(Instruction* instr);
+ void PrintShiftImm(Instruction* instr);
+ void PrintShiftSat(Instruction* instr);
+ void PrintPU(Instruction* instr);
void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
// Handle formatting of instructions and their options.
- int FormatRegister(Instr* instr, const char* option);
- int FormatOption(Instr* instr, const char* option);
- void Format(Instr* instr, const char* format);
- void Unknown(Instr* instr);
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
// Each of these functions decodes one particular instruction type, a 3-bit
// field in the instruction encoding.
// Types 0 and 1 are combined as they are largely the same except for the way
// they interpret the shifter operand.
- void DecodeType01(Instr* instr);
- void DecodeType2(Instr* instr);
- void DecodeType3(Instr* instr);
- void DecodeType4(Instr* instr);
- void DecodeType5(Instr* instr);
- void DecodeType6(Instr* instr);
+ void DecodeType01(Instruction* instr);
+ void DecodeType2(Instruction* instr);
+ void DecodeType3(Instruction* instr);
+ void DecodeType4(Instruction* instr);
+ void DecodeType5(Instruction* instr);
+ void DecodeType6(Instruction* instr);
// Type 7 includes special Debugger instructions.
- int DecodeType7(Instr* instr);
+ int DecodeType7(Instruction* instr);
// For VFP support.
- void DecodeTypeVFP(Instr* instr);
- void DecodeType6CoprocessorIns(Instr* instr);
+ void DecodeTypeVFP(Instruction* instr);
+ void DecodeType6CoprocessorIns(Instruction* instr);
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
- void DecodeVCMP(Instr* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+ void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+ void DecodeVCMP(Instruction* instr);
+ void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
const disasm::NameConverter& converter_;
- v8::internal::Vector<char> out_buffer_;
+ Vector<char> out_buffer_;
int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder);
@@ -169,15 +167,15 @@
// These condition names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
-static const char* cond_names[max_condition] = {
+static const char* cond_names[kNumberOfConditions] = {
"eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
"hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
};
// Print the condition guarding the instruction.
-void Decoder::PrintCondition(Instr* instr) {
- Print(cond_names[instr->ConditionField()]);
+void Decoder::PrintCondition(Instruction* instr) {
+ Print(cond_names[instr->ConditionValue()]);
}
@@ -188,36 +186,37 @@
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
- Print(assembler::arm::VFPRegisters::Name(reg, false));
+ Print(VFPRegisters::Name(reg, false));
}
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(assembler::arm::VFPRegisters::Name(reg, true));
+ Print(VFPRegisters::Name(reg, true));
}
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
-static const char* shift_names[max_shift] = {
+static const char* shift_names[kNumberOfShifts] = {
"lsl", "lsr", "asr", "ror"
};
// Print the register shift operands for the instruction. Generally used for
// data processing instructions.
-void Decoder::PrintShiftRm(Instr* instr) {
- Shift shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountField();
- int rm = instr->RmField();
+void Decoder::PrintShiftRm(Instruction* instr) {
+ ShiftOp shift = instr->ShiftField();
+ int shift_index = instr->ShiftValue();
+ int shift_amount = instr->ShiftAmountValue();
+ int rm = instr->RmValue();
PrintRegister(rm);
- if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
+ if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
// Special case for using rm only.
return;
}
- if (instr->RegShiftField() == 0) {
+ if (instr->RegShiftValue() == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
Print(", RRX");
@@ -225,14 +224,15 @@
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift], shift_amount);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift_index],
+ shift_amount);
} else {
// by register
- int rs = instr->RsField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift]);
+ int rs = instr->RsValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s ", shift_names[shift_index]);
PrintRegister(rs);
}
}
@@ -240,43 +240,43 @@
// Print the immediate operand for the instruction. Generally used for data
// processing instructions.
-void Decoder::PrintShiftImm(Instr* instr) {
- int rotate = instr->RotateField() * 2;
- int immed8 = instr->Immed8Field();
+void Decoder::PrintShiftImm(Instruction* instr) {
+ int rotate = instr->RotateValue() * 2;
+ int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d", imm);
}
// Print the optional shift and immediate used by saturating instructions.
-void Decoder::PrintShiftSat(Instr* instr) {
+void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[instr->Bit(6) * 2],
+ instr->Bits(11, 7));
}
}
// Print PU formatting to reduce complexity of FormatOption.
-void Decoder::PrintPU(Instr* instr) {
+void Decoder::PrintPU(Instruction* instr) {
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
Print("da");
break;
}
- case 1: {
+ case ia_x: {
Print("ia");
break;
}
- case 2: {
+ case db_x: {
Print("db");
break;
}
- case 3: {
+ case ib_x: {
Print("ib");
break;
}
@@ -292,22 +292,22 @@
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
switch (svc) {
- case call_rt_redirected:
- Print("call_rt_redirected");
+ case kCallRtRedirected:
+ Print("call rt redirected");
return;
- case break_point:
- Print("break_point");
+ case kBreakpoint:
+ Print("breakpoint");
return;
default:
- if (svc >= stop) {
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
+ if (svc >= kStopCode) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d - 0x%x",
+ svc & kStopCodeMask,
+ svc & kStopCodeMask);
} else {
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ svc);
}
return;
}
@@ -316,32 +316,32 @@
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatRegister(Instr* instr, const char* format) {
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 'n') { // 'rn: Rn register
- int reg = instr->RnField();
+ int reg = instr->RnValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: Rd register
- int reg = instr->RdField();
+ int reg = instr->RdValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsField();
+ int reg = instr->RsValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'm') { // 'rm: Rm register
- int reg = instr->RmField();
+ int reg = instr->RmValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: Rt register
- int reg = instr->RtField();
+ int reg = instr->RtValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
- int rlist = instr->RlistField();
+ int rlist = instr->RlistValue();
int reg = 0;
Print("{");
// Print register list in ascending order, by scanning the bit mask.
@@ -365,22 +365,22 @@
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
+int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
if (format[1] == 'n') {
- int reg = instr->VnField();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
+ int reg = instr->VnValue();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'm') {
- int reg = instr->VmField();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
+ int reg = instr->VmValue();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'd') {
- int reg = instr->VdField();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
+ int reg = instr->VdValue();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
}
@@ -390,19 +390,19 @@
}
-int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
+int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
Print(format);
return 0;
}
// Print the movw or movt instruction.
-void Decoder::PrintMovwMovt(Instr* instr) {
- int imm = instr->ImmedMovwMovtField();
- int rd = instr->RdField();
+void Decoder::PrintMovwMovt(Instruction* instr) {
+ int imm = instr->ImmedMovwMovtValue();
+ int rd = instr->RdValue();
PrintRegister(rd);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", imm);
}
@@ -411,7 +411,7 @@
// character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instr* instr, const char* format) {
+int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
case 'a': { // 'a: accumulate multiplies
if (instr->Bit(21) == 0) {
@@ -434,8 +434,8 @@
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%g", d);
return 1;
}
case 'f': { // 'f: bitfield instructions - v7 and above.
@@ -448,8 +448,8 @@
ASSERT(width > 0);
}
ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
return 1;
}
case 'h': { // 'h: halfword operation for extra loads and stores
@@ -469,9 +469,9 @@
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ instr->Bits(width + lsb - 1, lsb));
return 8;
}
case 'l': { // 'l: branch and link
@@ -505,31 +505,31 @@
ASSERT(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
return 3;
}
case 'o': {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Field());
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ (instr->Bits(19, 8) << 4) +
+ instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
- int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
+ int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", offs8);
return 4;
}
case 'p': { // 'pu: P and U bits for load and store instructions
@@ -544,10 +544,10 @@
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
- if (instr->TypeField() == 0) {
+ if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
- ASSERT(instr->TypeField() == 1);
+ ASSERT(instr->TypeValue() == 1);
PrintShiftImm(instr);
}
return 8;
@@ -562,7 +562,7 @@
}
} else if (format[1] == 'v') { // 'svc
ASSERT(STRING_STARTS_WITH(format, "svc"));
- PrintSoftwareInterrupt(instr->SvcField());
+ PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
ASSERT(STRING_STARTS_WITH(format, "sign"));
@@ -579,12 +579,12 @@
}
case 't': { // 'target: target of branch instructions
ASSERT(STRING_STARTS_WITH(format, "target"));
- int off = (instr->SImmed24Field() << 2) + 8;
- out_buffer_pos_ += v8i::OS::SNPrintF(
- out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ int off = (instr->SImmed24Value() << 2) + 8;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(
+ reinterpret_cast<byte*>(instr) + off));
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
@@ -633,7 +633,7 @@
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
-void Decoder::Format(Instr* instr, const char* format) {
+void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape.
@@ -649,13 +649,13 @@
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instr* instr) {
+void Decoder::Unknown(Instruction* instr) {
Format(instr, "unknown");
}
-void Decoder::DecodeType01(Instr* instr) {
- int type = instr->TypeField();
+void Decoder::DecodeType01(Instruction* instr) {
+ int type = instr->TypeValue();
if ((type == 0) && instr->IsSpecialType0()) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
@@ -689,7 +689,7 @@
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
} else {
@@ -697,7 +697,7 @@
}
break;
}
- case 1: {
+ case ia_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
} else {
@@ -705,7 +705,7 @@
}
break;
}
- case 2: {
+ case db_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
} else {
@@ -713,7 +713,7 @@
}
break;
}
- case 3: {
+ case ib_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
} else {
@@ -730,7 +730,7 @@
} else {
// extra load/store instructions
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
} else {
@@ -738,7 +738,7 @@
}
break;
}
- case 1: {
+ case ia_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
} else {
@@ -746,7 +746,7 @@
}
break;
}
- case 2: {
+ case db_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
} else {
@@ -754,7 +754,7 @@
}
break;
}
- case 3: {
+ case ib_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
} else {
@@ -772,7 +772,7 @@
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
- switch (instr->Bits(7, 4)) {
+ switch (instr->BitField(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
break;
@@ -787,7 +787,7 @@
break;
}
} else if (instr->Bits(22, 21) == 3) {
- switch (instr->Bits(7, 4)) {
+ switch (instr->BitField(7, 4)) {
case CLZ:
Format(instr, "clz'cond 'rd, 'rm");
break;
@@ -894,27 +894,27 @@
}
-void Decoder::DecodeType2(Instr* instr) {
+void Decoder::DecodeType2(Instruction* instr) {
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
}
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break;
}
- case 1: {
+ case ia_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
}
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break;
}
- case 2: {
+ case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
break;
}
- case 3: {
+ case ib_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
break;
}
@@ -927,14 +927,14 @@
}
-void Decoder::DecodeType3(Instr* instr) {
+void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
break;
}
- case 1: {
+ case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
@@ -947,11 +947,11 @@
}
break;
}
- case 2: {
+ case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break;
}
- case 3: {
+ case ib_x: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@@ -969,7 +969,7 @@
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
if (msbit >= lsbit) {
- if (instr->RmField() == 15) {
+ if (instr->RmValue() == 15) {
Format(instr, "bfc'cond 'rd, 'f");
} else {
Format(instr, "bfi'cond 'rd, 'rm, 'f");
@@ -991,7 +991,7 @@
}
-void Decoder::DecodeType4(Instr* instr) {
+void Decoder::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@@ -1001,41 +1001,43 @@
}
-void Decoder::DecodeType5(Instr* instr) {
+void Decoder::DecodeType5(Instruction* instr) {
Format(instr, "b'l'cond 'target");
}
-void Decoder::DecodeType6(Instr* instr) {
+void Decoder::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
-int Decoder::DecodeType7(Instr* instr) {
+int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
- if (instr->SvcField() >= stop) {
+ if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
- out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr + Instr::kInstrSize),
- *reinterpret_cast<char**>(instr + Instr::kInstrSize),
- *reinterpret_cast<char**>(instr + Instr::kInstrSize));
- // We have decoded 2 * Instr::kInstrSize bytes.
- return 2 * Instr::kInstrSize;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "\n %p %08x stop message: %s",
+ reinterpret_cast<int32_t*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize));
+ // We have decoded 2 * Instruction::kInstrSize bytes.
+ return 2 * Instruction::kInstrSize;
} else {
Format(instr, "svc'cond 'svc");
}
} else {
DecodeTypeVFP(instr);
}
- return Instr::kInstrSize;
+ return Instruction::kInstrSize;
}
-// void Decoder::DecodeTypeVFP(Instr* instr)
+// void Decoder::DecodeTypeVFP(Instruction* instr)
// vmov: Sn = Rt
// vmov: Rt = Sn
// vcvt: Dd = Sm
@@ -1048,34 +1050,37 @@
// vmrs
// vmsr
// Dd = vsqrt(Dm)
-void Decoder::DecodeTypeVFP(Instr* instr) {
- ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+void Decoder::DecodeTypeVFP(Instruction* instr) {
+ ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) {
- if (instr->Opc1Field() == 0x7) {
+ if (instr->Opc1Value() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
- if (instr->SzField() == 0x1) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else {
Format(instr, "vmov.f32'cond 'Sd, 'Sm");
}
- } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
+ // vabs
+ Format(instr, "vabs'cond 'Dd, 'Dm");
+ } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() >> 1) == 0x6) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
- } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+ } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
- } else if (instr->Opc3Field() == 0x0) {
- if (instr->SzField() == 0x1) {
+ } else if (instr->Opc3Value() == 0x0) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
@@ -1083,9 +1088,9 @@
} else {
Unknown(instr); // Not used by V8.
}
- } else if (instr->Opc1Field() == 0x3) {
- if (instr->SzField() == 0x1) {
- if (instr->Opc3Field() & 0x1) {
+ } else if (instr->Opc1Value() == 0x3) {
+ if (instr->SzValue() == 0x1) {
+ if (instr->Opc3Value() & 0x1) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
@@ -1093,14 +1098,14 @@
} else {
Unknown(instr); // Not used by V8.
}
- } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
- if (instr->SzField() == 0x1) {
+ } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
- } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
- if (instr->SzField() == 0x1) {
+ } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
@@ -1109,13 +1114,13 @@
Unknown(instr); // Not used by V8.
}
} else {
- if ((instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0)) {
+ if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VCField() == 0x0) &&
- (instr->VAField() == 0x7) &&
+ } else if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
- if (instr->VLField() == 0) {
+ if (instr->VLValue() == 0) {
if (instr->Bits(15, 12) == 0xF) {
Format(instr, "vmsr'cond FPSCR, APSR");
} else {
@@ -1133,11 +1138,12 @@
}
-void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0));
+void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+ Instruction* instr) {
+ ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0));
- bool to_arm_register = (instr->VLField() == 0x1);
+ bool to_arm_register = (instr->VLValue() == 0x1);
if (to_arm_register) {
Format(instr, "vmov'cond 'rt, 'Sn");
@@ -1147,19 +1153,19 @@
}
-void Decoder::DecodeVCMP(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1));
+void Decoder::DecodeVCMP(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1));
// Comparison.
- bool dp_operation = (instr->SzField() == 1);
+ bool dp_operation = (instr->SzValue() == 1);
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
- if (instr->Opc2Field() == 0x4) {
+ if (instr->Opc2Value() == 0x4) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
- } else if (instr->Opc2Field() == 0x5) {
+ } else if (instr->Opc2Value() == 0x5) {
Format(instr, "vcmp.f64'cond 'Dd, #0.0");
} else {
Unknown(instr); // invalid
@@ -1170,11 +1176,11 @@
}
-void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
- bool double_to_single = (instr->SzField() == 1);
+ bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
@@ -1184,13 +1190,13 @@
}
-void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
- (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzField() == 1);
+ bool dp_operation = (instr->SzValue() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
@@ -1232,11 +1238,11 @@
// <Rt, Rt2> = vmov(Dm)
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
-void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
- ASSERT((instr->TypeField() == 6));
+void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
+ ASSERT(instr->TypeValue() == 6);
- if (instr->CoprocessorField() == 0xA) {
- switch (instr->OpcodeField()) {
+ if (instr->CoprocessorValue() == 0xA) {
+ switch (instr->OpcodeValue()) {
case 0x8:
case 0xA:
if (instr->HasL()) {
@@ -1257,8 +1263,8 @@
Unknown(instr); // Not used by V8.
break;
}
- } else if (instr->CoprocessorField() == 0xB) {
- switch (instr->OpcodeField()) {
+ } else if (instr->CoprocessorValue() == 0xB) {
+ switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
@@ -1295,16 +1301,16 @@
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
- Instr* instr = Instr::At(instr_ptr);
+ Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- if (instr->ConditionField() == special_condition) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
+ if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
- return Instr::kInstrSize;
+ return Instruction::kInstrSize;
}
- switch (instr->TypeField()) {
+ switch (instr->TypeValue()) {
case 0:
case 1: {
DecodeType01(instr);
@@ -1339,11 +1345,11 @@
break;
}
}
- return Instr::kInstrSize;
+ return Instruction::kInstrSize;
}
-} } // namespace assembler::arm
+} } // namespace v8::internal
@@ -1351,8 +1357,6 @@
namespace disasm {
-namespace v8i = v8::internal;
-
const char* NameConverter::NameOfAddress(byte* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
@@ -1367,7 +1371,7 @@
const char* NameConverter::NameOfCPURegister(int reg) const {
- return assembler::arm::Registers::Name(reg);
+ return v8::internal::Registers::Name(reg);
}
@@ -1401,7 +1405,7 @@
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
- assembler::arm::Decoder d(converter_, buffer);
+ v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index d2726cf..a805d28 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,20 +30,13 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "frames-inl.h"
-#include "arm/assembler-arm-inl.h"
-
namespace v8 {
namespace internal {
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
- Address sp = fp + ExitFrameConstants::kSPOffset;
- if (marker == NULL) {
- sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
- }
- return sp;
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 00c20ef..4aa8d6a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -66,8 +66,7 @@
1 << 6 | // r6 v3
1 << 7 | // r7 v4
1 << 8 | // r8 v5 (cp in JavaScript code)
- kR9Available
- << 9 | // r9 v6
+ kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
@@ -108,21 +107,17 @@
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
- // TODO(regis): Use a patched sp value on the stack instead.
- // A marker of 0 indicates that double registers are saved.
- static const int kMarkerOffset = -2 * kPointerSize;
-
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
- // The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +2 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ // The calling JS function is below FP.
+ static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
};
@@ -132,8 +127,8 @@
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerSPOffset = 2 * kPointerSize;
};
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 338e39c..2685fcb 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -45,6 +45,67 @@
#define __ ACCESS_MASM(masm_)
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, Operand(reg));
+ // Don't use b(al, ...) as that might emit the constant pool right after the
+ // branch. After patching when the branch is no longer unconditional
+ // execution can continue into the constant pool.
+ __ b(eq, target); // Always taken before patched.
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, Operand(reg));
+ __ b(ne, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ reg.set_code(delta_to_patch_site / kOff12Mask);
+ __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -268,15 +329,10 @@
}
#ifdef DEBUG
- // Check that the size of the code used for returning matches what is
- // expected by the debugger. If the sp_delts above cannot be encoded in the
- // add instruction the add will generate two instructions.
- int return_sequence_length =
- masm_->InstructionsGeneratedSince(&check_exit_codesize);
- CHECK(return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions ||
- return_sequence_length ==
- Assembler::kJSReturnSequenceInstructions + 1);
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceInstructions <=
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
}
@@ -517,16 +573,16 @@
}
-void FullCodeGenerator::Split(Condition cc,
+void FullCodeGenerator::Split(Condition cond,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (if_false == fall_through) {
- __ b(cc, if_true);
+ __ b(cond, if_true);
} else if (if_true == fall_through) {
- __ b(NegateCondition(cc), if_false);
+ __ b(NegateCondition(cond), if_false);
} else {
- __ b(cc, if_true);
+ __ b(cond, if_true);
__ b(if_false);
}
}
@@ -681,18 +737,24 @@
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- VisitForStackValue(prop->obj());
- if (function != NULL) {
- VisitForStackValue(prop->key());
- VisitForAccumulatorValue(function);
- __ pop(r1); // Key.
- } else {
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, result_register()); // Key.
- __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ // property. Use (keyed) IC to set the initial value. We
+ // cannot visit the rewrite because it's shared and we risk
+ // recording duplicate AST IDs for bailouts from optimized code.
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- __ pop(r2); // Receiver.
+ if (function != NULL) {
+ __ push(r0);
+ VisitForAccumulatorValue(function);
+ __ pop(r2);
+ } else {
+ __ mov(r2, r0);
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ }
+ ASSERT(prop->key()->AsLiteral() != NULL &&
+ prop->key()->AsLiteral()->handle()->IsSmi());
+ __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -734,6 +796,8 @@
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
+ clause->body_target()->entry_label()->Unuse();
+
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@@ -750,24 +814,24 @@
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
+ patch_site.EmitJumpIfNotSmi(r2, &slow_case);
+
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
- __ bind(&slow_case);
+ __ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(eq, true, flags, r1, r0);
- __ CallStub(&stub);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site);
+ __ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@@ -817,7 +881,7 @@
// Convert the object to a JS object.
Label convert, done_convert;
- __ BranchOnSmi(r0, &convert);
+ __ JumpIfSmi(r0, &convert);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(hs, &done_convert);
__ bind(&convert);
@@ -1548,8 +1612,8 @@
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- GenericBinaryOpStub stub(op, mode, r1, r0);
- __ CallStub(&stub);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), NULL);
context()->Plug(r0);
}
@@ -1621,8 +1685,10 @@
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic(Builtins::builtin(is_strict()
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@@ -1912,7 +1978,10 @@
__ ldr(r1,
MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
__ push(r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
@@ -1988,16 +2057,21 @@
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed CallIC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
if (prop->is_synthetic()) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForAccumulatorValue(prop->key());
- }
+ // Do not visit the object and key subexpressions (they are shared
+ // by all occurrences of the same rewritten parameter).
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+ Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+ MemOperand operand = EmitSlotSearch(slot, r1);
+ __ ldr(r1, operand);
+
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+ __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
+
// Record source code position for IC call.
SetSourcePosition(prop->position());
- __ pop(r1); // We do not need to keep the receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -2006,6 +2080,9 @@
__ Push(r0, r1); // Function, receiver.
EmitCallWithStub(expr);
} else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
}
@@ -2122,7 +2199,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r0, ip);
__ b(eq, if_true);
@@ -2154,7 +2231,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -2175,7 +2252,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
@@ -2221,7 +2298,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -2242,7 +2319,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -2263,7 +2340,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -2370,7 +2447,7 @@
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
- __ BranchOnSmi(r0, &null);
+ __ JumpIfSmi(r0, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
@@ -2521,7 +2598,7 @@
Label done;
// If the object is a smi return the object.
- __ BranchOnSmi(r0, &done);
+ __ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
__ b(ne, &done);
@@ -2551,7 +2628,7 @@
Label done;
// If the object is a smi, return the value.
- __ BranchOnSmi(r1, &done);
+ __ JumpIfSmi(r1, &done);
// If the object is not a value type, return the value.
__ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
@@ -2978,29 +3055,31 @@
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
- } else {
- // Property or variable reference. Call the delete builtin with
- // object and property name as arguments.
- if (prop != NULL) {
+ } else if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- } else if (var->is_global()) {
- __ ldr(r1, GlobalObjectOperand());
- __ mov(r0, Operand(var->name()));
- __ Push(r1, r0);
- } else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
- __ push(context_register());
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(r0);
- __ mov(r2, Operand(var->name()));
- __ push(r2);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ context()->Plug(r0);
}
+ } else if (var->is_global()) {
+ __ ldr(r1, GlobalObjectOperand());
+ __ mov(r0, Operand(var->name()));
+ __ Push(r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(r0);
}
break;
}
@@ -3044,8 +3123,8 @@
Label no_conversion;
__ tst(result_register(), Operand(kSmiTagMask));
__ b(eq, &no_conversion);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
context()->Plug(result_register());
break;
@@ -3076,7 +3155,7 @@
bool inline_smi_code = ShouldInlineSmiCase(expr->op());
if (inline_smi_code) {
Label call_stub;
- __ BranchOnNotSmi(r0, &call_stub);
+ __ JumpIfNotSmi(r0, &call_stub);
__ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask));
@@ -3163,9 +3242,9 @@
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ BranchOnSmi(r0, &no_conversion);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ JumpIfSmi(r0, &no_conversion);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
// Save result for postfix expressions.
@@ -3197,7 +3276,7 @@
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- __ BranchOnSmi(r0, &done);
+ __ JumpIfSmi(r0, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -3450,34 +3529,34 @@
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = eq;
+ Condition cond = eq;
bool strict = false;
switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
case Token::EQ:
- cc = eq;
+ cond = eq;
__ pop(r1);
break;
case Token::LT:
- cc = lt;
+ cond = lt;
__ pop(r1);
break;
case Token::GT:
// Reverse left and right sides to obtain ECMA-262 conversion order.
- cc = lt;
+ cond = lt;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::LTE:
// Reverse left and right sides to obtain ECMA-262 conversion order.
- cc = ge;
+ cond = ge;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::GTE:
- cc = ge;
+ cond = ge;
__ pop(r1);
break;
case Token::IN:
@@ -3487,22 +3566,23 @@
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
- __ BranchOnNotSmi(r2, &slow_case);
+ patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
- Split(cc, if_true, if_false, NULL);
+ Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(cc, strict, flags, r1, r0);
- __ CallStub(&stub);
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- Split(cc, if_true, if_false, fall_through);
+ __ cmp(r0, Operand(0));
+ Split(cond, if_true, if_false, fall_through);
}
}
@@ -3568,6 +3648,16 @@
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 6120bba..8c76458 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -95,13 +95,13 @@
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
- __ b(nz, miss);
+ __ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip);
- __ b(nz, miss);
+ __ b(ne, miss);
}
@@ -379,7 +379,7 @@
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -388,7 +388,8 @@
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
+ support_wrappers);
// Cache miss: Jump to runtime.
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -419,14 +420,14 @@
int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
- __ BranchOnSmi(receiver, slow);
+ __ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(nz, slow);
+ __ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
@@ -749,7 +750,7 @@
Label index_smi, index_string;
// Check that the key is a smi.
- __ BranchOnNotSmi(r2, &check_string);
+ __ JumpIfNotSmi(r2, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1165,7 +1166,7 @@
Register receiver = r1;
// Check that the key is a smi.
- __ BranchOnNotSmi(key, &check_string);
+ __ JumpIfNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1188,19 +1189,18 @@
// r0: key
// r1: receiver
__ bind(&check_pixel_array);
- __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &check_number_dictionary);
- __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
- __ cmp(r2, ip);
- __ b(hs, &slow);
- __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
- __ ldrb(r2, MemOperand(ip, r2));
- __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi.
- __ Ret();
+
+ GenerateFastPixelArrayLoad(masm,
+ r1,
+ r0,
+ r3,
+ r4,
+ r2,
+ r5,
+ r0,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
@@ -1337,311 +1337,6 @@
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // Check that the object isn't a smi
- __ BranchOnSmi(receiver, &slow);
-
- // Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
-
- // Check that the object is a JS object. Load map into r2.
- __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r2, ip);
- __ b(ne, &slow);
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(ip, Operand(key, ASR, kSmiTagSize));
- // Unsigned comparison catches both negative and too-large values.
- __ b(lo, &slow);
-
- // r3: elements array
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (array_type) {
- case kExternalByteArray:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case kExternalUnsignedByteArray:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case kExternalShortArray:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case kExternalUnsignedShortArray:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case kExternalFloatArray:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For floating-point array type
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
-
- if (array_type == kExternalIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- WriteInt32ToHeapNumberStub stub(value, r0, r3);
- __ TailCallStub(&stub);
- }
- } else if (array_type == kExternalUnsignedIntArray) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
-
- __ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00, RelocInfo::NONE));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
@@ -1651,7 +1346,7 @@
Label slow;
// Check that the receiver isn't a smi.
- __ BranchOnSmi(r1, &slow);
+ __ JumpIfSmi(r1, &slow);
// Check that the key is an array index, that is Uint32.
__ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
@@ -1775,7 +1470,7 @@
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
- __ BranchOnNotSmi(value, &slow);
+ __ JumpIfNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip));
@@ -1838,385 +1533,8 @@
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, ¬_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(¬_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
- return true;
-
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
- return false;
-
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // Check that the object isn't a smi.
- __ BranchOnSmi(receiver, &slow);
-
- // Check that the object is a JS object. Load map into r3.
- __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
- __ b(le, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
-
- // Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
-
- // Check that the elements array is the appropriate type of ExternalArray.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r4, ip);
- __ b(ne, &slow);
-
- // Check that the index is in range.
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(r4, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- // r4: key (integer).
- __ BranchOnNotSmi(value, &check_heap_number);
- __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
- // r5: value (integer).
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- case kExternalFloatArray:
- // Perform int-to-float conversion and store to memory.
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
-
- // r3: external array.
- // r4: index (integer).
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
-
- if (array_type == kExternalFloatArray) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(r4, LSL, 2));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
-
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
-
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
- } else {
- __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (array_type == kExternalFloatArray) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else {
- bool is_signed_type = IsElementTypeSigned(array_type);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative than result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big than result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- __ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // Entry registers are intact.
- // r0: value
- // r1: key
- // r2: receiver
- GenerateRuntimeSetProperty(masm);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2227,7 +1545,8 @@
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
- MONOMORPHIC);
+ MONOMORPHIC,
+ extra_ic_state);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
@@ -2272,7 +1591,7 @@
Register scratch = r3;
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -2286,7 +1605,7 @@
__ b(ne, &miss);
// Check that value is a smi.
- __ BranchOnNotSmi(value, &miss);
+ __ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver, value);
@@ -2356,7 +1675,7 @@
return ge;
default:
UNREACHABLE();
- return no_condition;
+ return kNoCondition;
}
}
@@ -2383,11 +1702,78 @@
Token::Name(op_));
}
#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
}
void PatchInlinedSmiCode(Address address) {
- UNIMPLEMENTED();
+ Address cmp_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ if (!Assembler::IsCmpImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetCmpImmediateRawImmediate(instr);
+ delta +=
+ Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
+ // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+ // nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
+ address, cmp_instruction_address, delta);
+ }
+#endif
+
+ Address patch_address =
+ cmp_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ ASSERT(Assembler::IsCmpRegister(instr_at_patch));
+ ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
+ Assembler::GetRm(instr_at_patch).code());
+ ASSERT(Assembler::IsBranch(branch_instr));
+ if (Assembler::GetCondition(branch_instr) == eq) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b eq, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b ne, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
+ patcher.EmitCondition(ne);
+ } else {
+ ASSERT(Assembler::GetCondition(branch_instr) == ne);
+ // This is patching a "jump if smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b ne, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b eq, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
+ patcher.EmitCondition(eq);
+ }
}
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index c6eb628..df370c4 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -76,7 +76,7 @@
}
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
@@ -86,7 +86,7 @@
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
- cgen()->frame()->MergeTo(&entry_frame_, cc);
+ cgen()->frame()->MergeTo(&entry_frame_, cond);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
@@ -98,8 +98,8 @@
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
- __ b(cc, &entry_label_);
- if (cc == al) {
+ __ b(cond, &entry_label_);
+ if (cond == al) {
cgen()->DeleteFrame();
}
}
@@ -143,6 +143,16 @@
entry_frame_set_ = true;
} else {
cgen()->frame()->MergeTo(&entry_frame_);
+ // On fall through we may have to merge both ways.
+ if (direction_ != FORWARD_ONLY) {
+ // This will not need to adjust the virtual frame entries that are
+ // register allocated since that was done above and they now match.
+ // But it does need to adjust the entry_frame_ of this jump target
+ // to make it potentially less optimistic. Later code can branch back
+ // to this jump target and we need to assert that that code does not
+ // have weaker assumptions about types.
+ entry_frame_.MergeTo(cgen()->frame());
+ }
}
} else {
// If there is no current frame we must have an entry frame which we can
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index b51633e..82de5d3 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
@@ -56,6 +57,29 @@
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs must use a fixed register.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsDoubleStackSlot());
@@ -64,12 +88,11 @@
}
-void LInstruction::PrintTo(StringStream* stream) const {
+void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- result()->PrintTo(stream);
- stream->Add(" ");
- }
+
+ PrintOutputOperandTo(stream);
+
PrintDataTo(stream);
if (HasEnvironment()) {
@@ -84,7 +107,29 @@
}
-void LLabel::PrintDataTo(StringStream* stream) const {
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ inputs_.PrintOperandsTo(stream);
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ results_.PrintOperandsTo(stream);
+}
+
+
+template<typename T, int N>
+void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
+ for (int i = 0; i < N; i++) {
+ if (i > 0) stream->Add(" ");
+ elems_[i]->PrintTo(stream);
+ }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
LGap::PrintDataTo(stream);
LLabel* rep = replacement();
if (rep != NULL) {
@@ -136,6 +181,12 @@
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "shl-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@@ -143,74 +194,65 @@
}
-
-void LBinaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- left()->PrintTo(stream);
- stream->Add(" ");
- right()->PrintTo(stream);
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) const {
+void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
-void LBranch::PrintDataTo(StringStream* stream) const {
+void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- left()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -218,79 +260,82 @@
}
-void LTypeofIs::PrintDataTo(StringStream* stream) const {
- input()->PrintTo(stream);
+void LTypeofIs::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
}
-void LCallKeyed::PrintDataTo(StringStream* stream) const {
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
-void LCallNamed::PrintDataTo(StringStream* stream) const {
+void LCallNamed::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallGlobal::PrintDataTo(StringStream* stream) const {
+void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LCallNew::PrintDataTo(StringStream* stream) const {
- LUnaryOperation::PrintDataTo(stream);
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
-void LClassOfTest::PrintDataTo(StringStream* stream) const {
+void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
-void LUnaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- input()->PrintTo(stream);
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -301,6 +346,24 @@
}
+void LStoreNamed::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
LChunk::LChunk(HGraph* graph)
: spill_slot_count_(0),
graph_(graph),
@@ -310,11 +373,6 @@
}
-void LChunk::Verify() const {
- // TODO(twuerthinger): Implement verification for chunk.
-}
-
-
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
@@ -369,25 +427,7 @@
}
-void LStoreNamed::PrintDataTo(StringStream* stream) const {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) const {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -403,7 +443,6 @@
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -593,33 +632,52 @@
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::NONE));
}
-LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr, int index) {
return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateInstruction<1, I, T>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
- DoubleRegister reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -633,16 +691,16 @@
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
+ instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
+ instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
@@ -650,7 +708,10 @@
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -674,16 +735,15 @@
}
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ instr->MarkAsSaveDoubles();
return instr;
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new LPointerMap(position_));
return instr;
}
@@ -721,18 +781,38 @@
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
}
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), r1);
+ LOperand* right = UseFixed(instr->right(), r0);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+ }
+
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->OperandAt(0)->representation().IsInteger32());
ASSERT(instr->OperandAt(1)->representation().IsInteger32());
@@ -795,10 +875,11 @@
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
- LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
@@ -865,7 +946,6 @@
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -876,17 +956,19 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch()) {
- instr->set_hydrogen_value(HBranch::cast(current)->value());
+ if (current->IsTest() && !instr->IsGoto()) {
+ ASSERT(instr->IsControl());
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
+ ASSERT(first != NULL && second != NULL);
+ instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -931,23 +1013,15 @@
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
- HBasicBlock* first = instr->FirstSuccessor();
- HBasicBlock* second = instr->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- int first_id = first->block_id();
- int second_id = second->block_id();
-
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- first_id,
- second_id);
+ TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
Token::Value op = compare->token();
@@ -958,16 +1032,12 @@
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right),
- first_id,
- second_id);
+ UseRegisterAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right),
- first_id,
- second_id);
+ UseRegisterAtStart(right));
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
@@ -975,86 +1045,68 @@
LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
LInstruction* result = new LCmpTAndBranch(left_operand,
- right_operand,
- first_id,
- second_id);
+ right_operand);
return MarkAsCall(result, instr);
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()),
- first_id,
- second_id);
+ return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- first_id,
- second_id);
+ return new LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(compare->value()));
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()), first_id, second_id);
+ UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- first_id,
- second_id);
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
- temp1,
- temp2,
- first_id,
- second_id);
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()),
- first_id,
- second_id);
+ UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
- new LInstanceOfAndBranch(Use(instance_of->left()),
- Use(instance_of->right()),
- first_id,
- second_id);
+ new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
+ UseFixed(instance_of->right(), r1));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
- first_id,
- second_id);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(first_id);
+ return new LGoto(instr->FirstSuccessor()->block_id());
} else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(second_id);
+ return new LGoto(instr->SecondSuccessor()->block_id());
}
}
Abort("Undefined compare before branch");
return NULL;
}
}
- return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+ return new LBranch(UseRegisterAtStart(v));
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -1073,7 +1125,7 @@
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstruction* result =
+ LInstanceOf* result =
new LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
@@ -1082,21 +1134,22 @@
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
- LInstruction* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0));
- return MarkAsCall(DefineFixed(result, r0), instr);
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
+ MarkAsSaveDoubles(result);
+ return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);
- LOperand* length = UseRegisterAtStart(instr->length());
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LInstruction* result = new LApplyArguments(function,
- receiver,
- length,
- elements);
+ LOperand* length = UseFixed(instr->length(), r2);
+ LOperand* elements = UseFixed(instr->elements(), r3);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1108,13 +1161,26 @@
}
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1129,12 +1195,12 @@
BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LInstruction* result = new LUnaryMathOperation(input, temp);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
@@ -1162,8 +1228,8 @@
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
- UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr);
+ LOperand* key = UseFixed(instr->key(), r2);
+ return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
}
@@ -1188,7 +1254,7 @@
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
- LInstruction* result = new LCallNew(constructor);
+ LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1251,10 +1317,10 @@
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
- LOperand* value = UseFixed(instr->left(), r0);
+ LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
- DefineFixed(new LDivI(value, divisor), r0)));
+ DefineFixed(new LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1314,8 +1380,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1364,7 +1430,7 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
+ LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
@@ -1378,7 +1444,7 @@
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
- LInstruction* result = new LCmpT(left, right);
+ LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
@@ -1388,7 +1454,7 @@
HCompareJSObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LInstruction* result = new LCmpJSObjectEq(left, right);
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result);
}
@@ -1405,7 +1471,7 @@
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LIsObject(value, TempRegister()));
+ return DefineAsRegister(new LIsObject(value));
}
@@ -1447,6 +1513,12 @@
}
+LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LPixelArrayLength(array));
+}
+
+
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LFixedArrayLength(array));
@@ -1455,7 +1527,7 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LInstruction* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1466,6 +1538,13 @@
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), r0);
return MarkAsCall(new LThrow(value), instr);
@@ -1478,7 +1557,7 @@
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LInstruction* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1504,13 +1583,13 @@
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister();
- LInstruction* result = new LNumberTagD(value, temp1, temp2);
+ LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LInstruction* res = new LDoubleToI(value);
+ LDoubleToI* res = new LDoubleToI(value, TempRegister());
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1520,7 +1599,7 @@
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
- LInstruction* result = new LNumberTagI(value);
+ LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
@@ -1582,13 +1661,11 @@
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- int32_t value = instr->Integer32Value();
- return DefineAsRegister(new LConstantI(value));
+ return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
- double value = instr->DoubleValue();
- return DefineAsRegister(new LConstantD(value));
+ return DefineAsRegister(new LConstantD);
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT(instr->handle()));
+ return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1597,7 +1674,7 @@
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LInstruction* result = new LLoadGlobal();
+ LLoadGlobal* result = new LLoadGlobal();
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
@@ -1605,12 +1682,32 @@
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+ if (instr->check_hole_value()) {
+ LOperand* temp = TempRegister();
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(new LStoreGlobal(value, temp));
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new LStoreGlobal(value, NULL);
+ }
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- return DefineAsRegister(new LLoadContextSlot);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context = UseTempRegister(instr->context());
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ value = UseTempRegister(instr->value());
+ } else {
+ value = UseRegister(instr->value());
+ }
+ return new LStoreContextSlot(context, value);
}
@@ -1636,7 +1733,14 @@
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineSameAsFirst(new LLoadElements(input));
+ return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
+ HLoadPixelArrayExternalPointer* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
@@ -1646,11 +1750,24 @@
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LInstruction* result = new LLoadKeyedFastElement(obj, key);
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineSameAsFirst(result));
}
+LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
+ HLoadPixelArrayElement* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* external_pointer =
+ UseRegisterAtStart(instr->external_pointer());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadPixelArrayElement* result =
+ new LLoadPixelArrayElement(external_pointer, key);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
@@ -1717,6 +1834,20 @@
}
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
+}
+
+
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
}
@@ -1738,9 +1869,9 @@
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LInstruction* result = new LDeleteProperty(object, key);
+ LOperand* object = UseFixed(instr->object(), r0);
+ LOperand* key = UseFixed(instr->key(), r1);
+ LDeleteProperty* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1781,13 +1912,13 @@
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
- return DefineAsRegister(AssignEnvironment(result));
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LInstruction* result = new LTypeof(UseRegisterAtStart(instr->value()));
+ LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1796,6 +1927,12 @@
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
+
+LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
+ return DefineAsRegister(new LIsConstructCall());
+}
+
+
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
@@ -1819,7 +1956,7 @@
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LInstruction* result = new LLazyBailout;
result = AssignEnvironment(result);
- instructions_pending_deoptimization_environment_->
+ instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
return result;
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index aab4081..8d2573d 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -39,120 +39,11 @@
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LBinaryOperation
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpIDAndBranch
-// LCmpJSObjectEq
-// LCmpJSObjectEqAndBranch
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfAndBranch
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LCheckPrototypeMaps
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGlobalObject
-// LGlobalReceiver
-// LLabel
-// LLazyBailout
-// LLoadContextSlot
-// LLoadGlobal
-// LMaterializedLiteral
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LUnaryOperation
-// LJSArrayLength
-// LFixedArrayLength
-// LBitNotI
-// LBranch
-// LCallNew
-// LCheckFunction
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LClassOfTestAndBranch
-// LDeleteProperty
-// LDoubleToI
-// LHasCachedArrayIndex
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceType
-// LHasInstanceTypeAndBranch
-// LInteger32ToDouble
-// LIsNull
-// LIsNullAndBranch
-// LIsObject
-// LIsObjectAndBranch
-// LIsSmi
-// LIsSmiAndBranch
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LTypeofIsAndBranch
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(BinaryOperation) \
- V(Constant) \
+ V(ControlInstruction) \
V(Call) \
- V(MaterializedLiteral) \
V(StoreKeyed) \
V(StoreNamed) \
- V(UnaryOperation) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -183,6 +74,8 @@
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
@@ -193,6 +86,7 @@
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
@@ -203,6 +97,10 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
@@ -214,22 +112,18 @@
V(IsSmi) \
V(IsSmiAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
+ V(LoadPixelArrayElement) \
+ V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -237,7 +131,9 @@
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
+ V(PixelArrayLength) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -245,17 +141,22 @@
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(Throw) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@@ -284,32 +185,34 @@
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream) const;
- virtual void PrintDataTo(StringStream* stream) const { }
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
// Declare virtual type testers.
#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- virtual bool IsControl() const { return false; }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ virtual bool IsControl() const { return false; }
+ virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- void set_result(LOperand* operand) { result_.set(operand); }
- LOperand* result() const { return result_.get(); }
- bool HasResult() const { return result_.is_set(); }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -323,16 +226,98 @@
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
- SetOncePointer<LOperand> result_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
-class LGap: public LInstruction {
+template<typename ElementType, int NumElements>
+class OperandContainer {
+ public:
+ OperandContainer() {
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
+ }
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
+ }
+ void PrintOperandsTo(StringStream* stream);
+
+ private:
+ ElementType elems_[NumElements];
+};
+
+
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
+ public:
+ int length() { return 0; }
+ void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() { return results_[0]; }
+
+ int InputCount() { return I; }
+ LOperand* InputAt(int i) { return inputs_[i]; }
+
+ int TempCount() { return T; }
+ LOperand* TempAt(int i) { return temps_[i]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+ OperandContainer<LOperand*, R> results_;
+ OperandContainer<LOperand*, I> inputs_;
+ OperandContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -373,13 +358,13 @@
};
-class LGoto: public LInstruction {
+class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
LGoto(int block_id, bool include_stack_check = false)
: block_id_(block_id), include_stack_check_(include_stack_check) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
@@ -391,7 +376,7 @@
};
-class LLazyBailout: public LInstruction {
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -407,7 +392,7 @@
};
-class LDeoptimize: public LInstruction {
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
@@ -420,7 +405,7 @@
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -435,13 +420,13 @@
};
-class LParameter: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LInstruction {
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -452,96 +437,81 @@
};
-class LUnknownOSRValue: public LInstruction {
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
-class LUnaryOperation: public LInstruction {
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- explicit LUnaryOperation(LOperand* input) : input_(input) { }
+ DECLARE_INSTRUCTION(ControlInstruction)
+ virtual bool IsControl() const { return true; }
- DECLARE_INSTRUCTION(UnaryOperation)
-
- LOperand* input() const { return input_; }
-
- virtual void PrintDataTo(StringStream* stream) const;
+ int true_block_id() const { return true_block_id_; }
+ int false_block_id() const { return false_block_id_; }
+ void SetBranchTargets(int true_block_id, int false_block_id) {
+ true_block_id_ = true_block_id;
+ false_block_id_ = false_block_id;
+ }
private:
- LOperand* input_;
+ int true_block_id_;
+ int false_block_id_;
};
-class LBinaryOperation: public LInstruction {
- public:
- LBinaryOperation(LOperand* left, LOperand* right)
- : left_(left), right_(right) { }
-
- DECLARE_INSTRUCTION(BinaryOperation)
-
- LOperand* left() const { return left_; }
- LOperand* right() const { return right_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* left_;
- LOperand* right_;
-};
-
-
-class LApplyArguments: public LBinaryOperation {
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements)
- : LBinaryOperation(function, receiver),
- length_(length),
- elements_(elements) { }
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- LOperand* function() const { return left(); }
- LOperand* receiver() const { return right(); }
- LOperand* length() const { return length_; }
- LOperand* elements() const { return elements_; }
-
- private:
- LOperand* length_;
- LOperand* elements_;
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
};
-class LAccessArgumentsAt: public LInstruction {
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
- : arguments_(arguments), length_(length), index_(index) { }
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- LOperand* arguments() const { return arguments_; }
- LOperand* length() const { return length_; }
- LOperand* index() const { return index_; }
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* arguments_;
- LOperand* length_;
- LOperand* index_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LArgumentsLength: public LUnaryOperation {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
-class LArgumentsElements: public LInstruction {
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
@@ -549,341 +519,272 @@
};
-class LModI: public LBinaryOperation {
+class LModI: public LTemplateInstruction<1, 2, 0> {
public:
- LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LBinaryOperation {
+class LDivI: public LTemplateInstruction<1, 2, 0> {
public:
- LDivI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LDivI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMulI: public LBinaryOperation {
+class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp)
- : LBinaryOperation(left, right), temp_(temp) { }
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCmpID: public LBinaryOperation {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpID(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LCmpID(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpIDAndBranch: public LCmpID {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
public:
- LCmpIDAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpID(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
-};
-
-
-class LUnaryMathOperation: public LUnaryOperation {
- public:
- explicit LUnaryMathOperation(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCmpJSObjectEq: public LBinaryOperation {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) {}
+ LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
};
-class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpJSObjectEq(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
"cmp-jsobject-eq-and-branch")
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
};
-class LIsNull: public LUnaryOperation {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LIsNull(LOperand* value) : LUnaryOperation(value) {}
+ explicit LIsNull(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull);
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
bool is_strict() const { return hydrogen()->is_strict(); }
};
-
-class LIsNullAndBranch: public LIsNull {
+class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
- LIsNullAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LIsNull(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LIsNullAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ bool is_strict() const { return hydrogen()->is_strict(); }
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LIsObject: public LUnaryOperation {
+class LIsObject: public LTemplateInstruction<1, 1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) {}
+ explicit LIsObject(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LIsObjectAndBranch: public LIsObject {
+class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public:
- LIsObjectAndBranch(LOperand* value,
- LOperand* temp,
- LOperand* temp2,
- int true_block_id,
- int false_block_id)
- : LIsObject(value, temp),
- temp2_(temp2),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- LOperand* temp2() const { return temp2_; }
-
- private:
- LOperand* temp2_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LUnaryOperation {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+ explicit LIsSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
};
-class LIsSmiAndBranch: public LIsSmi {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
- LIsSmiAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LIsSmi(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LUnaryOperation {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LHasInstanceType(LOperand* value)
- : LUnaryOperation(value) { }
+ explicit LHasInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-
- InstanceType TestType(); // The type to test against when generating code.
- Condition BranchCondition(); // The branch condition for 'true'.
};
-class LHasInstanceTypeAndBranch: public LHasInstanceType {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
- LHasInstanceTypeAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LHasInstanceType(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LHasCachedArrayIndex: public LUnaryOperation {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+ explicit LHasCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
-class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
- LHasCachedArrayIndexAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LHasCachedArrayIndex(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LClassOfTest: public LUnaryOperation {
+class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClassOfTest(LOperand* value) : LUnaryOperation(value) {}
+ explicit LClassOfTest(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LClassOfTestAndBranch: public LClassOfTest {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
- LClassOfTestAndBranch(LOperand* value,
- LOperand* temporary,
- int true_block_id,
- int false_block_id)
- : LClassOfTest(value),
- temporary_(temporary),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- LOperand* temporary() { return temporary_; }
-
- private:
- LOperand* temporary_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpT: public LBinaryOperation {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+ LCmpT(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
@@ -892,61 +793,48 @@
};
-class LCmpTAndBranch: public LCmpT {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpTAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpT(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpTAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ Token::Value op() const { return hydrogen()->token(); }
};
-class LInstanceOf: public LBinaryOperation {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LInstanceOf(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfAndBranch: public LInstanceOf {
+class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
- LInstanceOfAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LInstanceOf(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LInstanceOfAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
};
-class LInstanceOfKnownGlobal: public LUnaryOperation {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LInstanceOfKnownGlobal(LOperand* left)
- : LUnaryOperation(left) { }
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
@@ -956,22 +844,27 @@
};
-class LBoundsCheck: public LBinaryOperation {
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
- LBoundsCheck(LOperand* index, LOperand* length)
- : LBinaryOperation(index, length) { }
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
- LOperand* index() const { return left(); }
- LOperand* length() const { return right(); }
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
};
-class LBitI: public LBinaryOperation {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
@@ -982,10 +875,13 @@
};
-class LShiftI: public LBinaryOperation {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
@@ -999,166 +895,164 @@
};
-class LSubI: public LBinaryOperation {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
- LSubI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
-class LConstant: public LInstruction {
- DECLARE_INSTRUCTION(Constant)
-};
-
-
-class LConstantI: public LConstant {
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantI(int32_t value) : value_(value) { }
- int32_t value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- int32_t value_;
+ int32_t value() const { return hydrogen()->Integer32Value(); }
};
-class LConstantD: public LConstant {
+class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantD(double value) : value_(value) { }
- double value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- double value_;
+ double value() const { return hydrogen()->DoubleValue(); }
};
-class LConstantT: public LConstant {
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantT(Handle<Object> value) : value_(value) { }
- Handle<Object> value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- Handle<Object> value_;
+ Handle<Object> value() const { return hydrogen()->handle(); }
};
-class LBranch: public LUnaryOperation {
+class LBranch: public LControlInstruction<1, 0> {
public:
- LBranch(LOperand* input, int true_block_id, int false_block_id)
- : LUnaryOperation(input),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpMapAndBranch: public LUnaryOperation {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
- LOperand* temp() const { return temp_; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
-
- private:
- LOperand* temp_;
};
-class LJSArrayLength: public LUnaryOperation {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LJSArrayLength(LOperand* input) : LUnaryOperation(input) { }
+ explicit LJSArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
-class LFixedArrayLength: public LUnaryOperation {
+class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LFixedArrayLength(LOperand* input) : LUnaryOperation(input) { }
+ explicit LPixelArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+};
+
+
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFixedArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
};
-class LValueOf: public LUnaryOperation {
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
- LValueOf(LOperand* input, LOperand* temporary)
- : LUnaryOperation(input), temporary_(temporary) { }
-
- LOperand* temporary() const { return temporary_; }
+ LValueOf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- LOperand* temporary_;
};
-class LThrow: public LUnaryOperation {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+ explicit LThrow(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LBitNotI: public LUnaryOperation {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
-class LAddI: public LBinaryOperation {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
- LAddI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LArithmeticD: public LBinaryOperation {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
@@ -1170,10 +1064,13 @@
};
-class LArithmeticT: public LBinaryOperation {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1185,166 +1082,267 @@
};
-class LReturn: public LUnaryOperation {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+ explicit LReturn(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LUnaryOperation {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
-class LLoadNamedGeneric: public LUnaryOperation {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedGeneric(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() const { return input(); }
+ LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
-class LLoadFunctionPrototype: public LUnaryOperation {
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadFunctionPrototype(LOperand* function)
- : LUnaryOperation(function) { }
+ explicit LLoadFunctionPrototype(LOperand* function) {
+ inputs_[0] = function;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
- LOperand* function() const { return input(); }
+ LOperand* function() { return inputs_[0]; }
};
-class LLoadElements: public LUnaryOperation {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+ explicit LLoadElements(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
-class LLoadKeyedFastElement: public LBinaryOperation {
+class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key)
- : LBinaryOperation(elements, key) { }
+ explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
+ "load-pixel-array-external-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
- LOperand* elements() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LLoadKeyedGeneric: public LBinaryOperation {
+class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key)
- : LBinaryOperation(obj, key) { }
+ LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
+ "load-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- LOperand* object() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LLoadGlobal: public LInstruction {
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
};
-class LStoreGlobal: public LUnaryOperation {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+ LStoreGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
};
-class LLoadContextSlot: public LInstruction {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() const {
- return hydrogen()->context_chain_length();
- }
- int slot_index() const { return hydrogen()->slot_index(); }
+ LOperand* context() { return InputAt(0); }
+ int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
-class LPushArgument: public LUnaryOperation {
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
public:
- explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
-class LGlobalObject: public LInstruction {
+class LContext: public LTemplateInstruction<1, 0, 0> {
public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
};
-class LGlobalReceiver: public LInstruction {
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
-class LCallConstantFunction: public LInstruction {
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- Handle<JSFunction> function() const { return hydrogen()->function(); }
+ Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LInstruction {
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LInstruction {
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LInstruction {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1353,44 +1351,46 @@
};
-class LCallGlobal: public LInstruction {
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LInstruction {
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LUnaryOperation {
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+ explicit LCallNew(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LInstruction {
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1400,42 +1400,45 @@
};
-class LInteger32ToDouble: public LUnaryOperation {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
-class LNumberTagI: public LUnaryOperation {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LNumberTagI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagD: public LUnaryOperation {
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2)
- : LUnaryOperation(value), temp1_(temp1), temp2_(temp2) { }
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-
- LOperand* temp1() const { return temp1_; }
- LOperand* temp2() const { return temp2_; }
-
- private:
- LOperand* temp1_;
- LOperand* temp2_;
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LUnaryOperation {
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+ explicit LDoubleToI(LOperand* value, LOperand* temp1) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
@@ -1445,42 +1448,46 @@
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LUnaryOperation {
+class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
public:
- LTaggedToI(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LTaggedToI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LSmiTag: public LUnaryOperation {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
-class LNumberUntagD: public LUnaryOperation {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
};
-class LSmiUntag: public LUnaryOperation {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
- LSmiUntag(LOperand* use, bool needs_check)
- : LUnaryOperation(use), needs_check_(needs_check) { }
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
@@ -1491,23 +1498,21 @@
};
-class LStoreNamed: public LInstruction {
+class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamed(LOperand* obj, LOperand* val)
- : object_(obj), value_(val) { }
+ LStoreNamed(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
DECLARE_INSTRUCTION(StoreNamed)
DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* object() const { return object_; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() const { return value_; }
-
- private:
- LOperand* object_;
- LOperand* value_;
};
@@ -1522,7 +1527,7 @@
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
- Handle<Map> transition() { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
};
@@ -1536,23 +1541,21 @@
};
-class LStoreKeyed: public LInstruction {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
- : object_(obj), key_(key), value_(val) { }
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
DECLARE_INSTRUCTION(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* object() const { return object_; }
- LOperand* key() const { return key_; }
- LOperand* value() const { return value_; }
-
- private:
- LOperand* object_;
- LOperand* key_;
- LOperand* value_;
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
@@ -1576,62 +1579,88 @@
};
-class LCheckFunction: public LUnaryOperation {
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckFunction(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
-class LCheckInstanceType: public LUnaryOperation {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckInstanceType(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCheckMap: public LUnaryOperation {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckMap(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
-class LCheckPrototypeMaps: public LInstruction {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)
- : temp1_(temp1), temp2_(temp2) { }
+ LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
Handle<JSObject> holder() const { return hydrogen()->holder(); }
-
- LOperand* temp1() const { return temp1_; }
- LOperand* temp2() const { return temp2_; }
-
- private:
- LOperand* temp1_;
- LOperand* temp2_;
};
-class LCheckSmi: public LUnaryOperation {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* use, Condition condition)
- : LUnaryOperation(use), condition_(condition) { }
+ LCheckSmi(LOperand* value, Condition condition)
+ : condition_(condition) {
+ inputs_[0] = value;
+ }
Condition condition() const { return condition_; }
@@ -1645,34 +1674,28 @@
};
-class LMaterializedLiteral: public LInstruction {
- public:
- DECLARE_INSTRUCTION(MaterializedLiteral)
-};
-
-
-class LArrayLiteral: public LMaterializedLiteral {
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
-class LObjectLiteral: public LMaterializedLiteral {
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
-class LRegExpLiteral: public LMaterializedLiteral {
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LInstruction {
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
@@ -1681,61 +1704,79 @@
};
-class LTypeof: public LUnaryOperation {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+ explicit LTypeof(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIs: public LUnaryOperation {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit LTypeofIs(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LTypeofIsAndBranch: public LTypeofIs {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
- LTypeofIsAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LTypeofIs(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LDeleteProperty: public LBinaryOperation {
+class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
+ DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDeleteProperty(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
- LOperand* object() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LOsrEntry: public LInstruction {
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry();
@@ -1758,7 +1799,7 @@
};
-class LStackCheck: public LInstruction {
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
};
@@ -1769,7 +1810,7 @@
public:
explicit LChunk(HGraph* graph);
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
@@ -1814,8 +1855,6 @@
inlined_closures_.Add(closure);
}
- void Verify() const;
-
private:
int spill_slot_count_;
HGraph* const graph_;
@@ -1837,7 +1876,7 @@
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
- instructions_pending_deoptimization_environment_(NULL),
+ instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
@@ -1872,9 +1911,10 @@
LUnallocated* ToUnallocated(DoubleRegister reg);
// Methods for setting up define-use relationships.
- LOperand* Use(HValue* value, LUnallocated* operand);
- LOperand* UseFixed(HValue* value, Register fixed_register);
- LOperand* UseFixedDouble(HValue* value, DoubleRegister fixed_register);
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
@@ -1884,37 +1924,53 @@
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
- LOperand* UseRegister(HValue* value);
- LOperand* UseRegisterAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
// An input operand in a register that may be trashed.
- LOperand* UseTempRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
// An input operand in a register or stack slot.
- LOperand* Use(HValue* value);
- LOperand* UseAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
// An input operand in a register, stack slot or a constant operand.
- LOperand* UseOrConstant(HValue* value);
- LOperand* UseOrConstantAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
// An input operand in a register or a constant operand.
- LOperand* UseRegisterOrConstant(HValue* value);
- LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- LOperand* UseAny(HValue* value);
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- LInstruction* Define(LInstruction* instr, LUnallocated* result);
- LInstruction* Define(LInstruction* instr);
- LInstruction* DefineAsRegister(LInstruction* instr);
- LInstruction* DefineAsSpilled(LInstruction* instr, int index);
- LInstruction* DefineSameAsFirst(LInstruction* instr);
- LInstruction* DefineFixed(LInstruction* instr, Register reg);
- LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -1927,6 +1983,7 @@
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
@@ -1934,11 +1991,6 @@
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
- // Temporary operand that must be in a register.
- LUnallocated* TempRegister();
- LOperand* FixedTemp(Register reg);
- LOperand* FixedTemp(DoubleRegister reg);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
@@ -1958,7 +2010,7 @@
int argument_count_;
LAllocator* allocator_;
int position_;
- LInstruction* instructions_pending_deoptimization_environment_;
+ LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 55df8b4..057ac24 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -223,7 +223,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -562,17 +562,11 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
- if (instr != NULL) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ Call(code, mode);
- RegisterLazyDeoptimization(instr);
- } else {
- LPointerMap no_pointers(0);
- RecordPosition(no_pointers.position());
- __ Call(code, mode);
- RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
- }
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ __ Call(code, mode);
+ RegisterLazyDeoptimization(instr);
}
@@ -585,15 +579,7 @@
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
- // Runtime calls to Throw are not supposed to ever return at the
- // call site, so don't register lazy deoptimization for these. We do
- // however have to record a safepoint since throwing exceptions can
- // cause garbage collections.
- if (!instr->IsThrow()) {
- RegisterLazyDeoptimization(instr);
- } else {
- RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
- }
+ RegisterLazyDeoptimization(instr);
}
@@ -661,7 +647,7 @@
return;
}
- if (cc == no_condition) {
+ if (cc == al) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
@@ -736,37 +722,40 @@
}
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- deoptimization_index);
+ kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegisters(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+ deoptimization_index);
}
@@ -774,20 +763,8 @@
LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegistersAndDoubles(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
+ deoptimization_index);
}
@@ -964,7 +941,8 @@
break;
}
case CodeStub::StringCharAt: {
- Abort("StringCharAtStub unimplemented.");
+ StringCharAtStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::MathPow: {
@@ -1015,8 +993,8 @@
LModI* instr_;
};
// These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -1038,6 +1016,34 @@
__ bind(&ok);
}
+ // Try a few common cases before using the generic stub.
+ Label call_stub;
+ const int kUnfolds = 3;
+ // Skip if either side is negative.
+ __ cmp(left, Operand(0));
+ __ cmp(right, Operand(0), NegateCondition(mi));
+ __ b(mi, &call_stub);
+ // If the right hand side is smaller than the (nonnegative)
+ // left hand side, it is the result. Else try a few subtractions
+ // of the left hand side.
+ __ mov(scratch, left);
+ for (int i = 0; i < kUnfolds; i++) {
+ // Check if the left hand side is less or equal than the
+ // the right hand side.
+ __ cmp(scratch, right);
+ __ mov(result, scratch, LeaveCC, lt);
+ __ b(lt, &done);
+ // If not, reduce the left hand side by the right hand
+ // side and check again.
+ if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
+ }
+
+ // Check for power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub);
+ // Perform modulo operation (scratch contains right - 1).
+ __ and_(result, scratch, Operand(left));
+
+ __ bind(&call_stub);
// Call the generic stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredModI* deferred = new DeferredModI(this, instr);
@@ -1048,8 +1054,8 @@
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
- __ BranchOnNotSmi(result, &deoptimize);
- __ mov(result, Operand(result, ASR, 1));
+ __ JumpIfNotSmi(result, &deoptimize);
+ __ SmiUntag(result);
__ b(al, &done);
__ bind(&deoptimize);
@@ -1070,8 +1076,8 @@
LDivI* instr_;
};
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register right = ToRegister(instr->InputAt(1));
const Register scratch = scratch0();
const Register result = ToRegister(instr->result());
@@ -1128,7 +1134,7 @@
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
- __ BranchOnNotSmi(result, &deoptimize);
+ __ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result);
__ b(&done);
@@ -1138,10 +1144,11 @@
}
-void LCodeGen::DoDeferredGenericBinaryStub(LBinaryOperation* instr,
+template<int T>
+void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
__ PushSafepointRegistersAndDoubles();
GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
@@ -1150,25 +1157,24 @@
0,
Safepoint::kNoDeoptimizationIndex);
// Overwrite the stored value of r0 with the result of the stub.
- __ str(r0, MemOperand(sp, DwVfpRegister::kNumAllocatableRegisters *
- kDoubleSize));
+ __ StoreToSafepointRegistersAndDoublesSlot(r0);
__ PopSafepointRegistersAndDoubles();
}
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
- Register left = ToRegister(instr->left());
- Register right = EmitLoadRegister(instr->right(), scratch);
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = EmitLoadRegister(instr->InputAt(1), scratch);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
- !instr->right()->IsConstantOperand()) {
- __ orr(ToRegister(instr->temp()), left, right);
+ !instr->InputAt(1)->IsConstantOperand()) {
+ __ orr(ToRegister(instr->TempAt(0)), left, right);
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// scratch:left = left * right.
- __ smull(scratch, left, left, right);
+ __ smull(left, scratch, left, right);
__ mov(ip, Operand(left, ASR, 31));
__ cmp(ip, Operand(scratch));
DeoptimizeIf(ne, instr->environment());
@@ -1181,13 +1187,13 @@
Label done;
__ tst(left, Operand(left));
__ b(ne, &done);
- if (instr->right()->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ if (instr->InputAt(1)->IsConstantOperand()) {
+ if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
+ DeoptimizeIf(al, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
- __ cmp(ToRegister(instr->temp()), Operand(0));
+ __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
DeoptimizeIf(mi, instr->environment());
}
__ bind(&done);
@@ -1196,8 +1202,8 @@
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
Register result = ToRegister(left);
@@ -1221,8 +1227,8 @@
void LCodeGen::DoShiftI(LShiftI* instr) {
Register scratch = scratch0();
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
Register result = ToRegister(left);
@@ -1279,9 +1285,9 @@
void LCodeGen::DoSubI(LSubI* instr) {
- Register left = ToRegister(instr->left());
- Register right = EmitLoadRegister(instr->right(), ip);
- ASSERT(instr->left()->Equals(instr->result()));
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = EmitLoadRegister(instr->InputAt(1), ip);
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
__ sub(left, left, right, SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(vs, instr->environment());
@@ -1311,22 +1317,29 @@
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->input());
+ Register array = ToRegister(instr->InputAt(0));
__ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
+void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
+}
+
+
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->input());
+ Register array = ToRegister(instr->InputAt(0));
__ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
}
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temporary());
+ Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
Label done;
@@ -1344,14 +1357,14 @@
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
__ mvn(ToRegister(input), Operand(ToRegister(input)));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->input(), ip);
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
__ push(input_reg);
CallRuntime(Runtime::kThrow, 1, instr);
@@ -1362,8 +1375,8 @@
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
Register right_reg = EmitLoadRegister(right, ip);
@@ -1376,8 +1389,8 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
switch (instr->op()) {
case Token::ADD:
__ vadd(left, left, right);
@@ -1392,7 +1405,18 @@
__ vdiv(left, left, right);
break;
case Token::MOD: {
- Abort("DoArithmeticD unimplemented for MOD.");
+ // Save r0-r3 on the stack.
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
+
+ __ PrepareCallCFunction(4, scratch0());
+ __ vmov(r0, r1, left);
+ __ vmov(r2, r3, right);
+ __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+ // Move the result in the double result register.
+ __ vmov(ToDoubleRegister(instr->result()), r0, r1);
+
+ // Restore r0-r3.
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
break;
}
default:
@@ -1403,8 +1427,8 @@
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
+ ASSERT(ToRegister(instr->InputAt(0)).is(r1));
+ ASSERT(ToRegister(instr->InputAt(1)).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
// TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
@@ -1448,11 +1472,11 @@
Representation r = instr->hydrogen()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, nz);
+ EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->input());
+ DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
@@ -1461,7 +1485,7 @@
EmitBranch(true_block, false_block, ne);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(reg, ip);
@@ -1508,7 +1532,7 @@
__ CallStub(&stub);
__ cmp(reg, Operand(0));
__ ldm(ia_w, sp, saved_regs);
- EmitBranch(true_block, false_block, nz);
+ EmitBranch(true_block, false_block, ne);
}
}
}
@@ -1560,7 +1584,7 @@
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
+ Condition cond = kNoCondition;
switch (op) {
case Token::EQ:
case Token::EQ_STRICT:
@@ -1588,40 +1612,84 @@
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- __ cmp(ToRegister(left), ToOperand(right));
- Abort("EmitCmpI untested.");
+ __ cmp(ToRegister(left), ToRegister(right));
}
void LCodeGen::DoCmpID(LCmpID* instr) {
- Abort("DoCmpID unimplemented.");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+ Register scratch = scratch0();
+
+ Label unordered, done;
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to unordered to return false.
+ __ b(vs, &unordered);
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+ __ b(cc, &done);
+
+ __ bind(&unordered);
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- Abort("DoCmpIDAndBranch unimplemented.");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ if (instr->is_double()) {
+ // Compare left and right as doubles and load the
+ // resulting flags into the normal status register.
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ b(vs, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ EmitBranch(true_block, false_block, cc);
}
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
- Abort("DoCmpJSObjectEq untested.");
}
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Abort("DoCmpJSObjectEqAndBranch unimplemented.");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmp(left, Operand(right));
+ EmitBranch(true_block, false_block, eq);
}
void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
__ LoadRoot(ip, Heap::kNullValueRootIndex);
@@ -1656,7 +1724,7 @@
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
@@ -1692,25 +1760,69 @@
Register temp2,
Label* is_not_object,
Label* is_object) {
- Abort("EmitIsObject unimplemented.");
- return ne;
+ __ JumpIfSmi(input, is_not_object);
+
+ __ LoadRoot(temp1, Heap::kNullValueRootIndex);
+ __ cmp(input, temp1);
+ __ b(eq, is_object);
+
+ // Load map.
+ __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ tst(temp2, Operand(1 << Map::kIsUndetectable));
+ __ b(ne, is_not_object);
+
+ // Load instance type and check that it is in object type range.
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, is_not_object);
+ __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+ return le;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
- Abort("DoIsObject unimplemented.");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register temp = scratch0();
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+ __ b(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ b(&done);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Abort("DoIsObjectAndBranch unimplemented.");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsObject(reg, temp1, temp2, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Register result = ToRegister(instr->result());
- Register input_reg = EmitLoadRegister(instr->input(), ip);
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
__ tst(input_reg, Operand(kSmiTagMask));
__ LoadRoot(result, Heap::kTrueValueRootIndex);
Label done;
@@ -1724,24 +1836,24 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Register input_reg = EmitLoadRegister(instr->input(), ip);
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
__ tst(input_reg, Operand(kSmiTagMask));
EmitBranch(true_block, false_block, eq);
}
-InstanceType LHasInstanceType::TestType() {
- InstanceType from = hydrogen()->from();
- InstanceType to = hydrogen()->to();
+static InstanceType TestType(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
}
-Condition LHasInstanceType::BranchCondition() {
- InstanceType from = hydrogen()->from();
- InstanceType to = hydrogen()->to();
+static Condition BranchCondition(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
if (from == to) return eq;
if (to == LAST_TYPE) return hs;
if (from == FIRST_TYPE) return ls;
@@ -1751,13 +1863,25 @@
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Abort("DoHasInstanceType unimplemented.");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label done;
+ __ tst(input, Operand(kSmiTagMask));
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
+ __ b(eq, &done);
+ __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
+ Condition cond = BranchCondition(instr->hydrogen());
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
+ __ bind(&done);
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1767,8 +1891,8 @@
__ tst(input, Operand(kSmiTagMask));
__ b(eq, false_label);
- __ CompareObjectType(input, scratch, scratch, instr->TestType());
- EmitBranch(true_block, false_block, instr->BranchCondition());
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
@@ -1841,7 +1965,7 @@
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Handle<String> class_name = instr->hydrogen()->class_name();
@@ -1862,9 +1986,9 @@
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register temp = scratch0();
- Register temp2 = ToRegister(instr->temporary());
+ Register temp2 = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1880,8 +2004,8 @@
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->input());
- Register temp = ToRegister(instr->temp());
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -1892,8 +2016,8 @@
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
+ ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -1906,12 +2030,133 @@
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- Abort("DoInstanceOfAndBranch unimplemented.");
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
+ ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ tst(r0, Operand(r0));
+ EmitBranch(true_block, false_block, eq);
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Abort("DoInstanceOfKnownGlobal unimplemented.");
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(object.is(r0));
+ ASSERT(result.is(r0));
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = temp;
+ __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ __ cmp(map, Operand(ip));
+ __ b(ne, &cache_miss);
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ mov(result, Operand(Factory::the_hole_value()));
+ __ b(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(object, Operand(ip));
+ __ b(eq, &false_result);
+
+ // String values is not instance of anything.
+ Condition is_string = masm_->IsObjectStringType(object, temp);
+ __ b(is_string, &false_result);
+
+ // Go to the deferred code.
+ __ b(deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(r0));
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(flags);
+
+ __ PushSafepointRegisters();
+
+ // Get the temp register reserved by the instruction. This needs to be r4 as
+ // its slot of the pushing of safepoint registers is used to communicate the
+ // offset to the location of the map check.
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(temp.is(r4));
+ __ mov(InstanceofStub::right(), Operand(instr->function()));
+ static const int kAdditionalDelta = 4;
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ BlockConstPoolFor(kAdditionalDelta);
+ __ mov(temp, Operand(delta * kPointerSize));
+ __ StoreToSafepointRegisterSlot(temp);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT_EQ(kAdditionalDelta,
+ masm_->InstructionsGeneratedSince(&before_push_delta));
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Put the result value into the result register slot and
+ // restore all registers.
+ __ StoreToSafepointRegisterSlot(result);
+
+ __ PopSafepointRegisters();
}
@@ -1930,7 +2175,7 @@
return ge;
default:
UNREACHABLE();
- return no_condition;
+ return kNoCondition;
}
}
@@ -1940,12 +2185,12 @@
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
- __ cmp(r0, Operand(0));
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
@@ -1956,7 +2201,21 @@
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Abort("DoCmpTAndBranch unimplemented.");
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ // The compare stub expects compare condition and the input operands
+ // reversed for GT and LTE.
+ Condition condition = ComputeCompareCondition(op);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ __ cmp(r0, Operand(0));
+ EmitBranch(true_block, false_block, condition);
}
@@ -1988,22 +2247,54 @@
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
- Register value = ToRegister(instr->input());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ Register value = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ // Load the cell.
+ __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted.
+ if (instr->hydrogen()->check_hole_value()) {
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ __ ldr(scratch2,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, ip);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Store the value.
+ __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- // TODO(antonm): load a context with a separate instruction.
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadContext(result, instr->context_chain_length());
+ __ ldr(result,
+ MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ ldr(result, ContextOperand(result, instr->slot_index()));
}
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ ldr(context,
+ MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(value, ContextOperand(context, instr->slot_index()));
+ if (instr->needs_write_barrier()) {
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, Operand(offset), value, scratch0());
+ }
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->input());
+ Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
@@ -2070,17 +2361,20 @@
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- ASSERT(instr->result()->Equals(instr->input()));
- Register reg = ToRegister(instr->input());
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
- __ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset));
+ __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done;
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
+ __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+ __ cmp(scratch, ip);
+ __ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
__ Check(eq, "Check for fast elements failed.");
@@ -2089,6 +2383,14 @@
}
+void LCodeGen::DoLoadPixelArrayExternalPointer(
+ LLoadPixelArrayExternalPointer* instr) {
+ Register to_reg = ToRegister(instr->result());
+ Register from_reg = ToRegister(instr->InputAt(0));
+ __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
+}
+
+
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
@@ -2125,6 +2427,16 @@
}
+void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
+ Register external_elements = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+
+ // Load the result.
+ __ ldrb(result, MemOperand(external_elements, key));
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@@ -2152,7 +2464,7 @@
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->input());
+ Register elem = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label done;
@@ -2176,29 +2488,33 @@
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register scratch = scratch0();
-
- ASSERT(receiver.is(r0));
- ASSERT(function.is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // If the receiver is null or undefined, we have to pass the
- // global object as a receiver.
- Label global_receiver, receiver_ok;
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_receiver);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(ne, &receiver_ok);
- __ bind(&global_receiver);
- __ ldr(receiver, GlobalObjectOperand());
- __ bind(&receiver_ok);
-
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(r0)); // Used for parameter count.
+ ASSERT(function.is(r1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(r0));
- Label invoke;
+ // If the receiver is null or undefined, we have to pass the global object
+ // as a receiver.
+ Label global_object, receiver_ok;
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ cmp(receiver, scratch);
+ __ b(eq, &global_object);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ cmp(receiver, scratch);
+ __ b(eq, &global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ tst(receiver, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
+ DeoptimizeIf(lo, instr->environment());
+ __ jmp(&receiver_ok);
+
+ __ bind(&global_object);
+ __ ldr(receiver, GlobalObjectOperand());
+ __ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -2215,7 +2531,7 @@
// Loop through the arguments pushing them onto the execution
// stack.
- Label loop;
+ Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ tst(length, Operand(length));
__ b(eq, &invoke);
@@ -2226,18 +2542,24 @@
__ b(ne, &loop);
__ bind(&invoke);
- // Invoke the function. The number of arguments is stored in receiver
- // which is r0, as expected by InvokeFunction.
- v8::internal::ParameterCount actual(receiver);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
SafepointGenerator safepoint_generator(this,
- instr->pointer_map(),
- Safepoint::kNoDeoptimizationIndex);
+ pointers,
+ env->deoptimization_index());
+ // The number of arguments is stored in receiver which is r0, as expected
+ // by InvokeFunction.
+ v8::internal::ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->input();
+ LOperand* argument = instr->InputAt(0);
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
Abort("DoPushArgument not implemented for double type.");
} else {
@@ -2247,16 +2569,32 @@
}
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ ldr(result,
+ MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -2301,43 +2639,166 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented.");
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| would be restored
+ // unchanged by popping safepoint registers.
+ __ tst(exponent, Operand(HeapNumber::kSignMask));
+ __ b(eq, &done);
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ __ PushSafepointRegisters();
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(r1) ? r0 : r1;
+ Register tmp2 = input.is(r2) ? r0 : r2;
+ Register tmp3 = input.is(r3) ? r0 : r3;
+ Register tmp4 = input.is(r4) ? r0 : r4;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ b(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input);
+ __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
+ __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ str(tmp1, masm()->SafepointRegisterSlot(input));
+ __ PopSafepointRegisters();
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ __ cmp(input, Operand(0));
+ // We can make rsb conditional because the previous cmp instruction
+ // will clear the V (overflow) flag and rsb won't set this flag
+ // if input is positive.
+ __ rsb(input, input, Operand(0), SetCC, mi);
+ // Deoptimize on overflow.
+ DeoptimizeIf(vs, instr->environment());
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- Abort("DoMathAbs unimplemented.");
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
+ __ vabs(input, input);
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+// Truncates a double using a specific rounding mode.
+// Clears the z flag (ne condition) if an overflow occurs.
+void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2) {
+ Register prev_fpscr = scratch1;
+ Register scratch = scratch2;
+
+ // Set custom FPCSR:
+ // - Set rounding mode.
+ // - Clear vfp cumulative exception flags.
+ // - Make sure Flush-to-zero mode control bit is unset.
+ __ vmrs(prev_fpscr);
+ __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
+ kVFPRoundingModeMask |
+ kVFPFlushToZeroMask));
+ __ orr(scratch, scratch, Operand(rounding_mode));
+ __ vmsr(scratch);
+
+ // Convert the argument to an integer.
+ __ vcvt_s32_f64(result,
+ double_input,
+ kFPSCRRounding);
+
+ // Retrieve FPSCR.
+ __ vmrs(scratch);
+ // Restore FPSCR.
+ __ vmsr(prev_fpscr);
+ // Check for vfp exceptions.
+ __ tst(scratch, Operand(kVFPExceptionMask));
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->input());
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register prev_fpscr = ToRegister(instr->temp());
SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch = scratch0();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
- // Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity".
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- __ vmrs(prev_fpscr);
- __ bic(scratch, prev_fpscr,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
- __ vmsr(scratch);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(single_scratch,
+ EmitVFPTruncate(kRoundToMinusInf,
+ single_scratch,
input,
- Assembler::FPSCRRounding,
- al);
-
- // Retrieve FPSCR and check for vfp exceptions.
- __ vmrs(scratch);
- // Restore FPSCR
- __ vmsr(prev_fpscr);
- __ tst(scratch, Operand(kVFPExceptionMask));
+ scratch1,
+ scratch2);
DeoptimizeIf(ne, instr->environment());
// Move the result back to general purpose register r0.
@@ -2347,15 +2808,15 @@
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->input());
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input));
__ vsqrt(input, input);
}
@@ -2431,7 +2892,7 @@
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->input()).is(r1));
+ ASSERT(ToRegister(instr->InputAt(0)).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
@@ -2483,7 +2944,9 @@
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(info_->is_strict()
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2530,8 +2993,157 @@
}
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register scratch = scratch0();
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ Label flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
+
+ // Handle non-flat strings.
+ __ tst(result, Operand(kIsConsStringMask));
+ __ b(eq, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(scratch, ip);
+ __ b(ne, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(ne, deferred->entry());
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ tst(result, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ ldrh(result,
+ FieldMemOperand(string,
+ SeqTwoByteString::kHeaderSize + 2 * const_index));
+ } else {
+ __ add(scratch,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ ldrh(result, MemOperand(scratch, index, LSL, 1));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ ldrb(result, FieldMemOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ add(scratch,
+ string,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ ldrb(result, MemOperand(scratch, index));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(0));
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ mov(scratch, Operand(Smi::FromInt(const_index)));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(r0);
+ }
+ __ SmiUntag(r0);
+ MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result);
+ __ str(r0, result_stack_slot);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -2557,7 +3169,7 @@
LNumberTagI* instr_;
};
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@@ -2570,7 +3182,7 @@
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
DoubleRegister dbl_scratch = d0;
SwVfpRegister flt_scratch = s0;
@@ -2627,11 +3239,11 @@
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->input());
+ DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
@@ -2664,7 +3276,7 @@
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ SmiTag(ToRegister(input));
@@ -2672,7 +3284,7 @@
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ tst(ToRegister(input), Operand(kSmiTagMask));
@@ -2739,11 +3351,11 @@
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done;
- Register input_reg = ToRegister(instr->input());
+ Register input_reg = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
DoubleRegister dbl_scratch = d0;
SwVfpRegister flt_scratch = s0;
- DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp());
+ DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0));
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -2800,7 +3412,7 @@
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -2820,7 +3432,7 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -2833,12 +3445,47 @@
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("DoDoubleToI unimplemented.");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ DoubleRegister double_input = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
+
+ VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
+ : kRoundToNearest;
+
+ EmitVFPTruncate(rounding_mode,
+ single_scratch,
+ double_input,
+ scratch1,
+ scratch2);
+ // Deoptimize if we had a vfp invalid exception.
+ DeoptimizeIf(ne, instr->environment());
+ // Retrieve the result.
+ __ vmov(result_reg, single_scratch);
+
+ if (instr->truncating() &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmp(result_reg, Operand(0));
+ __ b(ne, &done);
+ // Check for -0.
+ __ vmov(scratch1, double_input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+
+ __ bind(&done);
+ }
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
__ tst(ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(instr->condition(), instr->environment());
@@ -2846,7 +3493,7 @@
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
InstanceType first = instr->hydrogen()->first();
InstanceType last = instr->hydrogen()->last();
@@ -2870,8 +3517,8 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->input()->IsRegister());
- Register reg = ToRegister(instr->input());
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(instr->hydrogen()->target()));
DeoptimizeIf(ne, instr->environment());
}
@@ -2879,7 +3526,7 @@
void LCodeGen::DoCheckMap(LCheckMap* instr) {
Register scratch = scratch0();
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
@@ -2902,8 +3549,8 @@
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -3033,7 +3680,7 @@
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = !instr->hydrogen()->pretenure();
+ bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ mov(r1, Operand(shared_info));
@@ -3051,14 +3698,14 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
@@ -3081,7 +3728,7 @@
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -3100,7 +3747,7 @@
Label* false_label,
Register input,
Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
+ Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
if (type_name->Equals(Heap::number_symbol())) {
__ tst(input, Operand(kSmiTagMask));
@@ -3178,6 +3825,55 @@
}
+void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register result = ToRegister(instr->result());
+ Label true_label;
+ Label false_label;
+ Label done;
+
+ EmitIsConstructCall(result, scratch0());
+ __ b(eq, &true_label);
+
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ b(&done);
+
+
+ __ bind(&true_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp1, scratch0());
+ EmitBranch(true_block, false_block, eq);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+ ASSERT(!temp1.is(temp2));
+ // Get the frame pointer for the calling frame.
+ __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &check_frame_marker);
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+ __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@@ -3185,7 +3881,7 @@
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(al, instr->environment());
}
@@ -3193,10 +3889,14 @@
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
__ Push(object, key);
- RecordPosition(instr->pointer_map()->position());
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
SafepointGenerator safepoint_generator(this,
- instr->pointer_map(),
- Safepoint::kNoDeoptimizationIndex);
+ pointers,
+ env->deoptimization_index());
__ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
}
@@ -3214,7 +3914,19 @@
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- Abort("DoOsrEntry unimplemented.");
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+ instr->SpilledDoubleRegisterArray());
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(osr_pc_offset_ == -1);
+ osr_pc_offset_ = masm()->pc_offset();
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 9eed64b..7bc6689 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -93,12 +93,17 @@
void FinishCode(Handle<Code> code);
// Deferred code support.
- void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op);
+ template<int T>
+ void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -212,11 +217,21 @@
MemOperand ToMemOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
+ void EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
@@ -249,6 +264,10 @@
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp1, Register temp2);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 1028b0e..5d8df1a 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -318,7 +318,7 @@
CheckConstPool(true, true);
add(pc, pc, Operand(index,
LSL,
- assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
+ Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) {
@@ -369,12 +369,12 @@
void MacroAssembler::InNewSpace(Register object,
Register scratch,
- Condition cc,
+ Condition cond,
Label* branch) {
- ASSERT(cc == eq || cc == ne);
+ ASSERT(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
- b(cc, branch);
+ b(cond, branch);
}
@@ -485,6 +485,21 @@
PopSafepointRegisters();
}
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) {
+ str(reg, SafepointRegistersAndDoublesSlot(reg));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
+ str(reg, SafepointRegisterSlot(reg));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) {
+ ldr(reg, SafepointRegisterSlot(reg));
+}
+
+
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@@ -493,6 +508,19 @@
}
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // General purpose registers are pushed last on the stack.
+ int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+ return MemOperand(sp, doubles_size + register_offset);
+}
+
+
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
@@ -604,38 +632,21 @@
}
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
- // r0 is argc.
- // Compute callee's stack pointer before making changes and save it as
- // ip register so that it is restored as sp register on exit, thereby
- // popping the args.
-
- // ip = sp + kPointerSize * #args;
- add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Compute the argv pointer and keep it in a callee-saved register.
- sub(r6, ip, Operand(kPointerSize));
-
- // Prepare the stack to be aligned when calling into C. After this point there
- // are 5 pushes before the call into C, so the stack needs to be aligned after
- // 5 pushes.
- int frame_alignment = ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment != kPointerSize) {
- // The following code needs to be more general if this assert does not hold.
- ASSERT(frame_alignment == 2 * kPointerSize);
- // With 5 pushes left the frame must be unaligned at this point.
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
- push(r7, eq); // Push if aligned to make it unaligned.
- }
-
- // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
- stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+ // Setup the frame structure on the stack.
+ ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ Push(lr, fp);
mov(fp, Operand(sp)); // Setup new frame pointer.
-
+ // Reserve room for saved entry sp and code object.
+ sub(sp, sp, Operand(2 * kPointerSize));
+ if (FLAG_debug_code) {
+ mov(ip, Operand(0));
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
mov(ip, Operand(CodeObject()));
- push(ip); // Accessed from ExitFrame::code_slot.
+ str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -643,31 +654,32 @@
mov(ip, Operand(ExternalReference(Top::k_context_address)));
str(cp, MemOperand(ip));
- // Setup argc and the builtin function in callee-saved registers.
- mov(r4, Operand(r0));
- mov(r5, Operand(r1));
-
// Optionally save all double registers.
if (save_doubles) {
- // TODO(regis): Use vstrm instruction.
- // The stack alignment code above made sp unaligned, so add space for one
- // more double register and use aligned addresses.
- ASSERT(kDoubleSize == frame_alignment);
- // Mark the frame as containing doubles by pushing a non-valid return
- // address, i.e. 0.
- ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
- mov(ip, Operand(0)); // Marker and alignment word.
- push(ip);
- int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
- sub(sp, sp, Operand(space));
+ sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- vstr(reg, sp, i * kDoubleSize + kPointerSize);
+ vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
- // Note that d0 will be accessible at fp - 2*kPointerSize -
- // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
- // alignment word were pushed after the fp.
+ // Note that d0 will be accessible at
+ // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
+ // since the sp slot and code slot were pushed after the fp.
}
+
+ // Reserve place for the return address and stack space and align the frame
+ // preparing for calling the runtime function.
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(sp, sp, Operand(-frame_alignment));
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ add(ip, sp, Operand(kPointerSize));
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -705,12 +717,10 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all double registers.
if (save_doubles) {
- // TODO(regis): Use vldrm instruction.
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- // Register d15 is just below the marker.
- const int offset = ExitFrameConstants::kMarkerOffset;
- vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
+ const int offset = -2 * kPointerSize;
+ vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
}
@@ -726,9 +736,12 @@
str(r3, MemOperand(ip));
#endif
- // Pop the arguments, restore registers, and return.
- mov(sp, Operand(fp)); // respect ABI stack constraint
- ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+ // Tear down the exit frame, pop the arguments, and return. Callee-saved
+ // register r4 still holds argc.
+ mov(sp, Operand(fp));
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ mov(pc, lr);
}
@@ -923,7 +936,7 @@
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsNotStringMask));
- b(nz, fail);
+ b(ne, fail);
}
@@ -1137,7 +1150,8 @@
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- add(scratch2, result, Operand(obj_size_reg));
+ add(scratch2, result, Operand(obj_size_reg), SetCC);
+ b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
str(scratch2, MemOperand(topaddr));
@@ -1216,10 +1230,11 @@
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2));
+ add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
- add(scratch2, result, Operand(object_size));
+ add(scratch2, result, Operand(object_size), SetCC);
}
+ b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
@@ -1382,7 +1397,7 @@
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
- BranchOnSmi(obj, fail);
+ JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map));
@@ -1397,7 +1412,7 @@
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
- BranchOnSmi(obj, fail);
+ JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(ip, index);
@@ -1411,7 +1426,7 @@
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
- BranchOnSmi(function, miss);
+ JumpIfSmi(function, miss);
// Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
@@ -1454,17 +1469,115 @@
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+ ApiFunction* function, int stack_space) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(),
+ next_address);
+
+ // Allocate HandleScope in callee-save registers.
+ mov(r7, Operand(next_address));
+ ldr(r4, MemOperand(r7, kNextOffset));
+ ldr(r5, MemOperand(r7, kLimitOffset));
+ ldr(r6, MemOperand(r7, kLevelOffset));
+ add(r6, r6, Operand(1));
+ str(r6, MemOperand(r7, kLevelOffset));
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, function);
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ // If result is non-zero, dereference to get the result value
+ // otherwise set it to undefined.
+ cmp(r0, Operand(0));
+ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ ldr(r0, MemOperand(r0), ne);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ str(r4, MemOperand(r7, kNextOffset));
+ if (FLAG_debug_code) {
+ ldr(r1, MemOperand(r7, kLevelOffset));
+ cmp(r1, r6);
+ Check(eq, "Unexpected level after return from api call");
+ }
+ sub(r6, r6, Operand(1));
+ str(r6, MemOperand(r7, kLevelOffset));
+ ldr(ip, MemOperand(r7, kLimitOffset));
+ cmp(r5, ip);
+ b(ne, &delete_allocated_handles);
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+ ldr(r5, MemOperand(ip));
+ cmp(r4, r5);
+ b(ne, &promote_scheduled_exception);
+
+ // LeaveExitFrame expects unwind space to be in r4.
+ mov(r4, Operand(stack_space));
+ LeaveExitFrame(false);
+
+ bind(&promote_scheduled_exception);
+ MaybeObject* result = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ str(r5, MemOperand(r7, kLimitOffset));
+ mov(r4, r0);
+ PrepareCallCFunction(0, r5);
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+ mov(r0, r4);
+ jmp(&leave_exit_frame);
+
+ return result;
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
@@ -1510,7 +1623,7 @@
Label done;
if ((flags & OBJECT_NOT_SMI) == 0) {
Label not_smi;
- BranchOnNotSmi(object, ¬_smi);
+ JumpIfNotSmi(object, ¬_smi);
// Remove smi tag and convert to double.
mov(scratch1, Operand(object, ASR, kSmiTagSize));
vmov(scratch3, scratch1);
@@ -1556,13 +1669,14 @@
Register dest,
Register scratch,
Register scratch2,
+ DwVfpRegister double_scratch,
Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
- vldr(d0, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(s0, d0);
- vmov(dest, s0);
+ vldr(double_scratch, scratch, HeapNumber::kValueOffset);
+ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ vmov(dest, double_scratch.low());
// Signed vcvt instruction will saturate to the minimum (0x80000000) or
// maximun (0x7fffffff) signed 32bits integer when the double is out of
// range. When substracting one, the minimum signed integer becomes the
@@ -1659,6 +1773,13 @@
}
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+ Register src,
+ int num_least_bits) {
+ and_(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@@ -1718,6 +1839,17 @@
}
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ return TryJumpToExternalReference(ext);
+}
+
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
@@ -1736,6 +1868,18 @@
}
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+ const ExternalReference& builtin) {
+#if defined(__thumb__)
+ // Thumb mode builtin.
+ ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+ mov(r1, Operand(builtin));
+ CEntryStub stub(1);
+ return TryTailCallStub(&stub);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
PostCallGenerator* post_call_generator) {
@@ -1803,9 +1947,9 @@
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
+void MacroAssembler::Assert(Condition cond, const char* msg) {
if (FLAG_debug_code)
- Check(cc, msg);
+ Check(cond, msg);
}
@@ -1838,9 +1982,9 @@
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cond, const char* msg) {
Label L;
- b(cc, &L);
+ b(cond, &L);
Abort(msg);
// will not return here
bind(&L);
@@ -1933,10 +2077,21 @@
}
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero) {
+ sub(scratch, reg, Operand(1), SetCC);
+ b(mi, not_power_of_two_or_zero);
+ tst(scratch, reg);
+ b(ne, not_power_of_two_or_zero);
+}
+
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi);
@@ -1946,7 +2101,7 @@
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi);
@@ -1954,12 +2109,40 @@
void MacroAssembler::AbortIfSmi(Register object) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi");
}
+void MacroAssembler::AbortIfNotSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Assert(eq, "Operand is not smi");
+}
+
+
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(ip));
+ LoadRoot(ip, root_value_index);
+ cmp(src, ip);
+ Assert(eq, message);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ cmp(scratch, heap_number_map);
+ b(ne, on_not_heap_number);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -1986,7 +2169,7 @@
Register scratch2,
Label* failure) {
// Check that neither is a smi.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
tst(scratch1, Operand(kSmiTagMask));
b(eq, failure);
@@ -2063,7 +2246,7 @@
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
- ASSERT(!zeros.is(source) || !source.is(zeros));
+ ASSERT(!zeros.is(source) || !source.is(scratch));
ASSERT(!zeros.is(scratch));
ASSERT(!scratch.is(ip));
ASSERT(!source.is(ip));
@@ -2185,7 +2368,26 @@
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ const uint32_t kLdrOffsetMask = (1 << 12) - 1;
+ const int32_t kPCRegOffset = 2 * kPointerSize;
+ ldr(result, MemOperand(ldr_location));
+ if (FLAG_debug_code) {
+ // Check that the instruction is a ldr reg, [pc + offset] .
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, "The instruction to patch should be a load from pc.");
+ // Result was clobbered. Restore it.
+ ldr(result, MemOperand(ldr_location));
+ }
+ // Get the address of the constant.
+ and_(result, result, Operand(kLdrOffsetMask));
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(kPCRegOffset));
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
@@ -2208,15 +2410,21 @@
}
-void CodePatcher::Emit(Instr x) {
- masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+ masm()->emit(instr);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
-#endif // ENABLE_DEBUGGER_SUPPORT
+
+
+void CodePatcher::EmitCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ instr = (instr & ~kCondMask) | cond;
+ masm_.emit(instr);
+}
} } // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 324fbb2..36e4a1f 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -45,6 +45,12 @@
}
+static inline Operand SmiUntagOperand(Register object) {
+ return Operand(object, ASR, kSmiTagSize);
+}
+
+
+
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
@@ -139,7 +145,7 @@
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
- Condition cc, // eq for new space, ne otherwise
+ Condition cond, // eq for new space, ne otherwise
Label* branch);
@@ -234,8 +240,12 @@
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
-
+ void StoreToSafepointRegisterSlot(Register reg);
+ void StoreToSafepointRegistersAndDoublesSlot(Register reg);
+ void LoadFromSafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
+ static MemOperand SafepointRegisterSlot(Register reg);
+ static MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
@@ -283,10 +293,8 @@
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame.
- // Expects the number of arguments in register r0 and
- // the builtin function to call in register r1. Exits with argc in
- // r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(bool save_doubles);
+ // stack_space - extra stack space, used for alignment before call to C.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(bool save_doubles);
@@ -544,16 +552,6 @@
}
- inline void BranchOnSmi(Register value, Label* smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(eq, smi_label);
- }
-
- inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(ne, not_smi_label);
- }
-
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
@@ -566,6 +564,7 @@
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
// Uses VFP instructions to Convert a Smi to a double.
void IntegerToDoubleConversionWithVFP3(Register inReg,
@@ -595,11 +594,13 @@
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label.
+ // to not_int32 label. If VFP3 is available double_scratch is used but not
+ // scratch2.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
+ DwVfpRegister double_scratch,
Label *not_int32);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
@@ -620,6 +621,12 @@
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
+ // Tail call a code stub (jump) and return the code object called. Try to
+ // generate the code if necessary. Do not perform a GC but instead return
+ // a retry after GC failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+ Condition cond = al);
+
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -638,6 +645,12 @@
int num_arguments,
int result_size);
+ // Tail call of a runtime routine (jump). Try to generate the code if
+ // necessary. Do not perform a GC but instead return a retry after GC
+ // failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size);
+
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -661,9 +674,18 @@
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context.
+ // stack_space - space to be unwound on exit (includes the call js
+ // arguments space and the additional space allocated for the fast call).
+ MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
+ int stack_space);
+
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
+ MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
@@ -694,14 +716,14 @@
// ---------------------------------------------------------------------------
// Debugging
- // Calls Abort(msg) if the condition cc is not satisfied.
+ // Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
+ void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
+ void Check(Condition cond, const char* msg);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
@@ -713,11 +735,25 @@
bool allow_stub_calls() { return allow_stub_calls_; }
// ---------------------------------------------------------------------------
+ // Number utilities
+
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero);
+
+ // ---------------------------------------------------------------------------
// Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s);
}
+ void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
+ add(dst, src, Operand(src), s);
+ }
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
@@ -732,7 +768,20 @@
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
+ void SmiUntag(Register dst, Register src) {
+ mov(dst, Operand(src, ASR, kSmiTagSize));
+ }
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ tst(value, Operand(kSmiTagMask));
+ b(eq, smi_label);
+ }
+ // Jump if either of the registers contain a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ tst(value, Operand(kSmiTagMask));
+ b(ne, not_smi_label);
+ }
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
@@ -740,6 +789,20 @@
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
+ void AbortIfNotSmi(Register object);
+
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
// ---------------------------------------------------------------------------
// String utilities
@@ -776,6 +839,15 @@
Label* failure);
+ // ---------------------------------------------------------------------------
+ // Patching helpers.
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
@@ -821,11 +893,15 @@
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
- void Emit(Instr x);
+ void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
+ // Emit the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void EmitCondition(Condition cond);
+
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 0065057..8104747 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -40,14 +40,8 @@
#if defined(USE_SIMULATOR)
// Only build the simulator if not compiling for real ARM hardware.
-namespace assembler {
-namespace arm {
-
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -62,14 +56,13 @@
explicit Debugger(Simulator* sim);
~Debugger();
- void Stop(Instr* instr);
+ void Stop(Instruction* instr);
void Debug();
private:
- static const instr_t kBreakpointInstr =
- ((AL << 28) | (7 << 25) | (1 << 24) | break_point);
- static const instr_t kNopInstr =
- ((AL << 28) | (13 << 21));
+ static const Instr kBreakpointInstr =
+ (al | (7*B25) | (1*B24) | kBreakpoint);
+ static const Instr kNopInstr = (al | (13*B21));
Simulator* sim_;
@@ -80,8 +73,8 @@
bool GetVFPDoubleValue(const char* desc, double* value);
// Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instr* breakpc);
- bool DeleteBreakpoint(Instr* breakpc);
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
// Undo and redo all breakpoints. This is needed to bracket disassembly and
// execution to skip past breakpoints when run from the debugger.
@@ -112,12 +105,12 @@
}
-void Debugger::Stop(Instr* instr) {
+void Debugger::Stop(Instruction* instr) {
// Get the stop code.
- uint32_t code = instr->SvcField() & kStopCodeMask;
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
char* msg = *msg_address;
ASSERT(msg != NULL);
@@ -133,9 +126,9 @@
}
// Overwrite the instruction and address with nops.
instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
}
- sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
}
#else // ndef GENERATED_CODE_COVERAGE
@@ -144,17 +137,23 @@
}
-void Debugger::Stop(Instr* instr) {
+void Debugger::Stop(Instruction* instr) {
// Get the stop code.
- uint32_t code = instr->SvcField() & kStopCodeMask;
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ + Instruction::kInstrSize);
// Update this stop description.
if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
sim_->watched_stops[code].desc = msg;
}
- PrintF("Simulator hit %s\n", msg);
- sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ PrintF("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
#endif
@@ -212,7 +211,7 @@
}
-bool Debugger::SetBreakpoint(Instr* breakpc) {
+bool Debugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@@ -227,7 +226,7 @@
}
-bool Debugger::DeleteBreakpoint(Instr* breakpc) {
+bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@@ -299,10 +298,10 @@
"%" XSTR(ARG_SIZE) "s",
cmd, arg1, arg2);
if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
@@ -397,20 +396,20 @@
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instr::kInstrSize);
+ end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
- end = cur + (value * Instr::kInstrSize);
+ end = cur + (value * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instr::kInstrSize);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
@@ -428,7 +427,7 @@
if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
PrintF("setting breakpoint failed\n");
}
} else {
@@ -450,13 +449,13 @@
PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
- PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_);
+ PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
- intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize;
- Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
- Instr* msg_address =
- reinterpret_cast<Instr*>(stop_pc + Instr::kInstrSize);
+ intptr_t stop_pc = sim_->get_pc() - 2 * Instruction::kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->isStopInstruction(stop_instr)) {
@@ -641,7 +640,7 @@
}
-void Simulator::CheckICache(Instr* instr) {
+void Simulator::CheckICache(Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -654,7 +653,7 @@
// Check that the data in memory matches the contents of the I-cache.
CHECK(memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset),
- Instr::kInstrSize) == 0);
+ Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -745,14 +744,14 @@
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, bool fp_return)
+ Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function),
- swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
- fp_return_(fp_return),
+ swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
+ type_(type),
next_(list_) {
Simulator::current()->
FlushICache(reinterpret_cast<void*>(&swi_instruction_),
- Instr::kInstrSize);
+ Instruction::kInstrSize);
list_ = this;
}
@@ -761,17 +760,18 @@
}
void* external_function() { return external_function_; }
- bool fp_return() { return fp_return_; }
+ ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function, bool fp_return) {
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
Redirection* current;
for (current = list_; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, fp_return);
+ return new Redirection(external_function, type);
}
- static Redirection* FromSwiInstruction(Instr* swi_instruction) {
+ static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
@@ -781,7 +781,7 @@
private:
void* external_function_;
uint32_t swi_instruction_;
- bool fp_return_;
+ ExternalReference::Type type_;
Redirection* next_;
static Redirection* list_;
};
@@ -791,8 +791,8 @@
void* Simulator::RedirectExternalReference(void* external_function,
- bool fp_return) {
- Redirection* redirection = Redirection::Get(external_function, fp_return);
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -830,7 +830,7 @@
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
if (reg >= num_registers) return 0;
// End stupid code.
- return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
+ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
@@ -996,7 +996,7 @@
// targets that don't support unaligned loads and stores.
-int Simulator::ReadW(int32_t addr, Instr* instr) {
+int Simulator::ReadW(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
@@ -1012,7 +1012,7 @@
}
-void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
+void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
@@ -1029,7 +1029,7 @@
}
-uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
+uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
@@ -1045,7 +1045,7 @@
}
-int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
+int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
@@ -1061,7 +1061,7 @@
}
-void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
+void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
@@ -1078,7 +1078,7 @@
}
-void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
+void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
@@ -1163,7 +1163,7 @@
// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instr* instr, const char* format) {
+void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
@@ -1172,23 +1172,23 @@
// Checks if the current instruction should be executed based on its
// condition bits.
-bool Simulator::ConditionallyExecute(Instr* instr) {
+bool Simulator::ConditionallyExecute(Instruction* instr) {
switch (instr->ConditionField()) {
- case EQ: return z_flag_;
- case NE: return !z_flag_;
- case CS: return c_flag_;
- case CC: return !c_flag_;
- case MI: return n_flag_;
- case PL: return !n_flag_;
- case VS: return v_flag_;
- case VC: return !v_flag_;
- case HI: return c_flag_ && !z_flag_;
- case LS: return !c_flag_ || z_flag_;
- case GE: return n_flag_ == v_flag_;
- case LT: return n_flag_ != v_flag_;
- case GT: return !z_flag_ && (n_flag_ == v_flag_);
- case LE: return z_flag_ || (n_flag_ != v_flag_);
- case AL: return true;
+ case eq: return z_flag_;
+ case ne: return !z_flag_;
+ case cs: return c_flag_;
+ case cc: return !c_flag_;
+ case mi: return n_flag_;
+ case pl: return !n_flag_;
+ case vs: return v_flag_;
+ case vc: return !v_flag_;
+ case hi: return c_flag_ && !z_flag_;
+ case ls: return !c_flag_ || z_flag_;
+ case ge: return n_flag_ == v_flag_;
+ case lt: return n_flag_ != v_flag_;
+ case gt: return !z_flag_ && (n_flag_ == v_flag_);
+ case le: return z_flag_ || (n_flag_ != v_flag_);
+ case al: return true;
default: UNREACHABLE();
}
return false;
@@ -1290,10 +1290,10 @@
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
-int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
- Shift shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountField();
- int32_t result = get_register(instr->RmField());
+int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
+ ShiftOp shift = instr->ShiftField();
+ int shift_amount = instr->ShiftAmountValue();
+ int32_t result = get_register(instr->RmValue());
if (instr->Bit(4) == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
@@ -1357,7 +1357,7 @@
}
} else {
// by register
- int rs = instr->RsField();
+ int rs = instr->RsValue();
shift_amount = get_register(rs) &0xff;
switch (shift) {
case ASR: {
@@ -1434,9 +1434,9 @@
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with immediate.
-int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
- int rotate = instr->RotateField() * 2;
- int immed8 = instr->Immed8Field();
+int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
+ int rotate = instr->RotateValue() * 2;
+ int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
*carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
return imm;
@@ -1456,36 +1456,32 @@
// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instr* instr, bool load) {
- int rn = instr->RnField();
+void Simulator::HandleRList(Instruction* instr, bool load) {
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
- int rlist = instr->RlistField();
+ int rlist = instr->RlistValue();
int num_regs = count_bits(rlist);
intptr_t start_address = 0;
intptr_t end_address = 0;
switch (instr->PUField()) {
- case 0: {
- // Print("da");
+ case da_x: {
UNIMPLEMENTED();
break;
}
- case 1: {
- // Print("ia");
+ case ia_x: {
start_address = rn_val;
end_address = rn_val + (num_regs * 4) - 4;
rn_val = rn_val + (num_regs * 4);
break;
}
- case 2: {
- // Print("db");
+ case db_x: {
start_address = rn_val - (num_regs * 4);
end_address = rn_val - 4;
rn_val = start_address;
break;
}
- case 3: {
- // Print("ib");
+ case ib_x: {
start_address = rn_val + 4;
end_address = rn_val + (num_regs * 4);
rn_val = end_address;
@@ -1533,13 +1529,16 @@
int32_t arg2,
int32_t arg3);
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
-void Simulator::SoftwareInterrupt(Instr* instr) {
- int svc = instr->SvcField();
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+ int svc = instr->SvcValue();
switch (svc) {
- case call_rt_redirected: {
+ case kCallRtRedirected: {
// Check if stack is aligned. Error if not aligned is reported below to
// include information on the function called.
bool stack_aligned =
@@ -1555,9 +1554,9 @@
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
- if (redirection->fp_return()) {
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+ if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1573,9 +1572,28 @@
CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
+ } else if (redirection->type() == ExternalReference::DIRECT_CALL) {
+ SimulatorRuntimeApiCall target =
+ reinterpret_cast<SimulatorRuntimeApiCall>(external);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF(
+ "Call to host function at %p args %08x",
+ FUNCTION_ADDR(target),
+ arg0);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ v8::Handle<v8::Value> result = target(arg0);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
+ }
+ set_register(r0, (int32_t) *result);
} else {
- intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
+ // builtin call.
+ ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1606,7 +1624,7 @@
set_pc(get_register(lr));
break;
}
- case break_point: {
+ case kBreakpoint: {
Debugger dbg(this);
dbg.Debug();
break;
@@ -1624,7 +1642,7 @@
Debugger dbg(this);
dbg.Stop(instr);
} else {
- set_pc(get_pc() + 2 * Instr::kInstrSize);
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
}
} else {
// This is not a valid svc code.
@@ -1637,8 +1655,8 @@
// Stop helper functions.
-bool Simulator::isStopInstruction(Instr* instr) {
- return (instr->Bits(27, 24) == 0xF) && (instr->SvcField() >= stop);
+bool Simulator::isStopInstruction(Instruction* instr) {
+ return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
}
@@ -1712,17 +1730,17 @@
// Instruction types 0 and 1 are both rolled into one function because they
// only differ in the handling of the shifter_operand.
-void Simulator::DecodeType01(Instr* instr) {
- int type = instr->TypeField();
+void Simulator::DecodeType01(Instruction* instr) {
+ int type = instr->TypeValue();
if ((type == 0) && instr->IsSpecialType0()) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
if (instr->Bit(24) == 0) {
// Raw field decoding here. Multiply instructions have their Rd in
// funny places.
- int rn = instr->RnField();
- int rm = instr->RmField();
- int rs = instr->RsField();
+ int rn = instr->RnValue();
+ int rm = instr->RmValue();
+ int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
int32_t rm_val = get_register(rm);
if (instr->Bit(23) == 0) {
@@ -1756,7 +1774,7 @@
// at a very detailed level.)
// Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
int rd_hi = rn; // Remap the rn field to the RdHi register.
- int rd_lo = instr->RdField();
+ int rd_lo = instr->RdValue();
int32_t hi_res = 0;
int32_t lo_res = 0;
if (instr->Bit(22) == 1) {
@@ -1784,15 +1802,15 @@
}
} else {
// extra load/store instructions
- int rd = instr->RdField();
- int rn = instr->RnField();
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
int32_t addr = 0;
if (instr->Bit(22) == 0) {
- int rm = instr->RmField();
+ int rm = instr->RmValue();
int32_t rm_val = get_register(rm);
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1800,7 +1818,7 @@
set_register(rn, rn_val);
break;
}
- case 1: {
+ case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1808,7 +1826,7 @@
set_register(rn, rn_val);
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
rn_val -= rm_val;
addr = rn_val;
@@ -1817,7 +1835,7 @@
}
break;
}
- case 3: {
+ case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
rn_val += rm_val;
addr = rn_val;
@@ -1833,9 +1851,9 @@
}
}
} else {
- int32_t imm_val = (instr->ImmedHField() << 4) | instr->ImmedLField();
+ int32_t imm_val = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1843,7 +1861,7 @@
set_register(rn, rn_val);
break;
}
- case 1: {
+ case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1851,7 +1869,7 @@
set_register(rn, rn_val);
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
rn_val -= imm_val;
addr = rn_val;
@@ -1860,7 +1878,7 @@
}
break;
}
- case 3: {
+ case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
rn_val += imm_val;
addr = rn_val;
@@ -1917,15 +1935,15 @@
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
- int rm = instr->RmField();
- switch (instr->Bits(7, 4)) {
+ int rm = instr->RmValue();
+ switch (instr->BitField(7, 4)) {
case BX:
set_pc(get_register(rm));
break;
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
- set_register(lr, old_pc + Instr::kInstrSize);
+ set_register(lr, old_pc + Instruction::kInstrSize);
break;
}
case BKPT: {
@@ -1938,9 +1956,9 @@
UNIMPLEMENTED();
}
} else if (instr->Bits(22, 21) == 3) {
- int rm = instr->RmField();
- int rd = instr->RdField();
- switch (instr->Bits(7, 4)) {
+ int rm = instr->RmValue();
+ int rd = instr->RdValue();
+ switch (instr->BitField(7, 4)) {
case CLZ: {
uint32_t bits = get_register(rm);
int leading_zeros = 0;
@@ -1963,15 +1981,15 @@
UNIMPLEMENTED();
}
} else {
- int rd = instr->RdField();
- int rn = instr->RnField();
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
int32_t shifter_operand = 0;
bool shifter_carry_out = 0;
if (type == 0) {
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
} else {
- ASSERT(instr->TypeField() == 1);
+ ASSERT(instr->TypeValue() == 1);
shifter_operand = GetImm(instr, &shifter_carry_out);
}
int32_t alu_out;
@@ -2067,7 +2085,7 @@
SetCFlag(shifter_carry_out);
} else {
// Format(instr, "movw'cond 'rd, 'imm").
- alu_out = instr->ImmedMovwMovtField();
+ alu_out = instr->ImmedMovwMovtValue();
set_register(rd, alu_out);
}
break;
@@ -2099,7 +2117,7 @@
} else {
// Format(instr, "movt'cond 'rd, 'imm").
alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtField() << 16);
+ (instr->ImmedMovwMovtValue() << 16);
set_register(rd, alu_out);
}
break;
@@ -2178,14 +2196,14 @@
}
-void Simulator::DecodeType2(Instr* instr) {
- int rd = instr->RdField();
- int rn = instr->RnField();
+void Simulator::DecodeType2(Instruction* instr) {
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
- int32_t im_val = instr->Offset12Field();
+ int32_t im_val = instr->Offset12Value();
int32_t addr = 0;
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -2193,7 +2211,7 @@
set_register(rn, rn_val);
break;
}
- case 1: {
+ case ia_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -2201,7 +2219,7 @@
set_register(rn, rn_val);
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
rn_val -= im_val;
addr = rn_val;
@@ -2210,7 +2228,7 @@
}
break;
}
- case 3: {
+ case ib_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
rn_val += im_val;
addr = rn_val;
@@ -2242,21 +2260,21 @@
}
-void Simulator::DecodeType3(Instr* instr) {
- int rd = instr->RdField();
- int rn = instr->RnField();
+void Simulator::DecodeType3(Instruction* instr) {
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
bool shifter_carry_out = 0;
int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
int32_t addr = 0;
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
break;
}
- case 1: {
+ case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
@@ -2265,7 +2283,7 @@
int32_t sat_val = (1 << sat_pos) - 1;
int32_t shift = instr->Bits(11, 7);
int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmField());
+ int32_t rm_val = get_register(instr->RmValue());
if (shift_type == 0) { // LSL
rm_val <<= shift;
} else { // ASR
@@ -2290,7 +2308,7 @@
}
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
addr = rn_val - shifter_operand;
if (instr->HasW()) {
@@ -2298,7 +2316,7 @@
}
break;
}
- case 3: {
+ case ib_x: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@@ -2307,16 +2325,16 @@
if (instr->Bit(22)) {
// ubfx - unsigned bitfield extract.
uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmField()));
+ static_cast<uint32_t>(get_register(instr->RmValue()));
uint32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdField(), extr_val);
+ set_register(instr->RdValue(), extr_val);
} else {
// sbfx - signed bitfield extract.
- int32_t rm_val = get_register(instr->RmField());
+ int32_t rm_val = get_register(instr->RmValue());
int32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdField(), extr_val);
+ set_register(instr->RdValue(), extr_val);
}
} else {
UNREACHABLE();
@@ -2328,18 +2346,18 @@
if (msbit >= lsbit) {
// bfc or bfi - bitfield clear/insert.
uint32_t rd_val =
- static_cast<uint32_t>(get_register(instr->RdField()));
+ static_cast<uint32_t>(get_register(instr->RdValue()));
uint32_t bitcount = msbit - lsbit + 1;
uint32_t mask = (1 << bitcount) - 1;
rd_val &= ~(mask << lsbit);
- if (instr->RmField() != 15) {
+ if (instr->RmValue() != 15) {
// bfi - bitfield insert.
uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmField()));
+ static_cast<uint32_t>(get_register(instr->RmValue()));
rm_val &= mask;
rd_val |= rm_val << lsbit;
}
- set_register(instr->RdField(), rd_val);
+ set_register(instr->RdValue(), rd_val);
} else {
UNREACHABLE();
}
@@ -2376,7 +2394,7 @@
}
-void Simulator::DecodeType4(Instr* instr) {
+void Simulator::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@@ -2388,24 +2406,24 @@
}
-void Simulator::DecodeType5(Instr* instr) {
+void Simulator::DecodeType5(Instruction* instr) {
// Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Field() << 2);
+ int off = (instr->SImmed24Value() << 2);
intptr_t pc_address = get_pc();
if (instr->HasLink()) {
- set_register(lr, pc_address + Instr::kInstrSize);
+ set_register(lr, pc_address + Instruction::kInstrSize);
}
int pc_reg = get_register(pc);
set_pc(pc_reg + off);
}
-void Simulator::DecodeType6(Instr* instr) {
+void Simulator::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
-void Simulator::DecodeType7(Instr* instr) {
+void Simulator::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
SoftwareInterrupt(instr);
} else {
@@ -2414,7 +2432,7 @@
}
-// void Simulator::DecodeTypeVFP(Instr* instr)
+// void Simulator::DecodeTypeVFP(Instruction* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
// vmov :Rt = Sn
@@ -2427,47 +2445,52 @@
// vcmp(Dd, Dm)
// vmrs
// Dd = vsqrt(Dm)
-void Simulator::DecodeTypeVFP(Instr* instr) {
- ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+void Simulator::DecodeTypeVFP(Instruction* instr) {
+ ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
// Obtain double precision register codes.
- int vm = instr->VFPMRegCode(kDoublePrecision);
- int vd = instr->VFPDRegCode(kDoublePrecision);
- int vn = instr->VFPNRegCode(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vn = instr->VFPNRegValue(kDoublePrecision);
if (instr->Bit(4) == 0) {
- if (instr->Opc1Field() == 0x7) {
+ if (instr->Opc1Value() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
- if (instr->SzField() == 0x1) {
- int m = instr->VFPMRegCode(kDoublePrecision);
- int d = instr->VFPDRegCode(kDoublePrecision);
+ if (instr->SzValue() == 0x1) {
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
set_d_register_from_double(d, get_double_from_d_register(m));
} else {
- int m = instr->VFPMRegCode(kSinglePrecision);
- int d = instr->VFPDRegCode(kSinglePrecision);
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
set_s_register_from_float(d, get_float_from_s_register(m));
}
- } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
+ // vabs
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = fabs(dm_value);
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() >> 1) == 0x6) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
- } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+ } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
set_d_register_from_double(vd, dd_value);
- } else if (instr->Opc3Field() == 0x0) {
+ } else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
- if (instr->SzField() == 0x1) {
+ if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
UNREACHABLE(); // Not used by v8.
@@ -2475,12 +2498,12 @@
} else {
UNREACHABLE(); // Not used by V8.
}
- } else if (instr->Opc1Field() == 0x3) {
- if (instr->SzField() != 0x1) {
+ } else if (instr->Opc1Value() == 0x3) {
+ if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
- if (instr->Opc3Field() & 0x1) {
+ if (instr->Opc3Value() & 0x1) {
// vsub
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
@@ -2493,9 +2516,9 @@
double dd_value = dn_value + dm_value;
set_d_register_from_double(vd, dd_value);
}
- } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
// vmul
- if (instr->SzField() != 0x1) {
+ if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
@@ -2503,9 +2526,9 @@
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
- if (instr->SzField() != 0x1) {
+ if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
@@ -2517,15 +2540,15 @@
UNIMPLEMENTED(); // Not used by V8.
}
} else {
- if ((instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0)) {
+ if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLField() == 0x1) &&
- (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x7) &&
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmrs
- uint32_t rt = instr->RtField();
+ uint32_t rt = instr->RtValue();
if (rt == 0xF) {
Copy_FPSCR_to_APSR();
} else {
@@ -2539,15 +2562,15 @@
(overflow_vfp_flag_ << 2) |
(div_zero_vfp_flag_ << 1) |
(inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_ << 22);
+ (FPSCR_rounding_mode_);
set_register(rt, fpscr);
}
- } else if ((instr->VLField() == 0x0) &&
- (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x7) &&
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmsr
- uint32_t rt = instr->RtField();
+ uint32_t rt = instr->RtValue();
if (rt == pc) {
UNREACHABLE();
} else {
@@ -2562,7 +2585,7 @@
div_zero_vfp_flag_ = (rt_value >> 1) & 1;
inv_op_vfp_flag_ = (rt_value >> 0) & 1;
FPSCR_rounding_mode_ =
- static_cast<FPSCRRoundingModes>((rt_value >> 22) & 3);
+ static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
}
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2571,13 +2594,14 @@
}
-void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0));
+void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+ Instruction* instr) {
+ ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0));
- int t = instr->RtField();
- int n = instr->VFPNRegCode(kSinglePrecision);
- bool to_arm_register = (instr->VLField() == 0x1);
+ int t = instr->RtValue();
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ bool to_arm_register = (instr->VLValue() == 0x1);
if (to_arm_register) {
int32_t int_value = get_sinteger_from_s_register(n);
@@ -2589,27 +2613,27 @@
}
-void Simulator::DecodeVCMP(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1));
+void Simulator::DecodeVCMP(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1));
// Comparison.
VFPRegPrecision precision = kSinglePrecision;
- if (instr->SzField() == 1) {
+ if (instr->SzValue() == 1) {
precision = kDoublePrecision;
}
- int d = instr->VFPDRegCode(precision);
+ int d = instr->VFPDRegValue(precision);
int m = 0;
- if (instr->Opc2Field() == 0x4) {
- m = instr->VFPMRegCode(precision);
+ if (instr->Opc2Value() == 0x4) {
+ m = instr->VFPMRegValue(precision);
}
if (precision == kDoublePrecision) {
double dd_value = get_double_from_d_register(d);
double dm_value = 0.0;
- if (instr->Opc2Field() == 0x4) {
+ if (instr->Opc2Value() == 0x4) {
dm_value = get_double_from_d_register(m);
}
@@ -2627,19 +2651,19 @@
}
-void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
VFPRegPrecision dst_precision = kDoublePrecision;
VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzField() == 1) {
+ if (instr->SzValue() == 1) {
dst_precision = kSinglePrecision;
src_precision = kDoublePrecision;
}
- int dst = instr->VFPDRegCode(dst_precision);
- int src = instr->VFPMRegCode(src_precision);
+ int dst = instr->VFPDRegValue(dst_precision);
+ int src = instr->VFPMRegValue(src_precision);
if (dst_precision == kSinglePrecision) {
double val = get_double_from_d_register(src);
@@ -2650,92 +2674,140 @@
}
}
+bool get_inv_op_vfp_flag(VFPRoundingMode mode,
+ double val,
+ bool unsigned_) {
+ ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
+ double max_uint = static_cast<double>(0xffffffffu);
+ double max_int = static_cast<double>(kMaxInt);
+ double min_int = static_cast<double>(kMinInt);
-void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
- (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+ // Check for NaN.
+ if (val != val) {
+ return true;
+ }
+
+ // Check for overflow. This code works because 32bit integers can be
+ // exactly represented by ieee-754 64bit floating-point values.
+ switch (mode) {
+ case RN:
+ return unsigned_ ? (val >= (max_uint + 0.5)) ||
+ (val < -0.5)
+ : (val >= (max_int + 0.5)) ||
+ (val < (min_int - 0.5));
+
+ case RM:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val < 0)
+ : (val >= (max_int + 1.0)) ||
+ (val < min_int);
+
+ case RZ:
+ return unsigned_ ? (val >= (max_uint + 1.0)) ||
+ (val <= -1)
+ : (val >= (max_int + 1.0)) ||
+ (val <= (min_int - 1.0));
+ default:
+ UNREACHABLE();
+ return true;
+ }
+}
+
+
+// We call this function only if we had a vfp invalid exception.
+// It returns the correct saturated value.
+int VFPConversionSaturate(double val, bool unsigned_res) {
+ if (val != val) {
+ return 0;
+ } else {
+ if (unsigned_res) {
+ return (val < 0) ? 0 : 0xffffffffu;
+ } else {
+ return (val < 0) ? kMinInt : kMaxInt;
+ }
+ }
+}
+
+
+void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
+ (instr->Bits(27, 23) == 0x1D));
+ ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer.
bool to_integer = (instr->Bit(18) == 1);
- VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzField() == 1) {
- src_precision = kDoublePrecision;
- }
+ VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
+ : kSinglePrecision;
if (to_integer) {
+ // We are playing with code close to the C++ standard's limits below,
+ // hence the very simple code and heavy checks.
+ //
+ // Note:
+ // C++ defines default type casting from floating point to integer as
+ // (close to) rounding toward zero ("fractional part discarded").
+
+ int dst = instr->VFPDRegValue(kSinglePrecision);
+ int src = instr->VFPMRegValue(src_precision);
+
+ // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
+ // mode or the default Round to Zero mode.
+ VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
+ : RZ;
+ ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
+
bool unsigned_integer = (instr->Bit(16) == 0);
- FPSCRRoundingModes mode;
- if (instr->Bit(7) != 1) {
- // Use FPSCR defined rounding mode.
- mode = FPSCR_rounding_mode_;
- // Only RZ and RM modes are supported.
- ASSERT((mode == RM) || (mode == RZ));
+ bool double_precision = (src_precision == kDoublePrecision);
+
+ double val = double_precision ? get_double_from_d_register(src)
+ : get_float_from_s_register(src);
+
+ int temp = unsigned_integer ? static_cast<uint32_t>(val)
+ : static_cast<int32_t>(val);
+
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
+
+ if (inv_op_vfp_flag_) {
+ temp = VFPConversionSaturate(val, unsigned_integer);
} else {
- // VFP uses round towards zero by default.
- mode = RZ;
+ switch (mode) {
+ case RN: {
+ double abs_diff =
+ unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
+ : fabs(val - temp);
+ int val_sign = (val > 0) ? 1 : -1;
+ if (abs_diff > 0.5) {
+ temp += val_sign;
+ } else if (abs_diff == 0.5) {
+ // Round to even if exactly halfway.
+ temp = ((temp % 2) == 0) ? temp : temp + val_sign;
+ }
+ break;
+ }
+
+ case RM:
+ temp = temp > val ? temp - 1 : temp;
+ break;
+
+ case RZ:
+ // Nothing to do.
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
- int dst = instr->VFPDRegCode(kSinglePrecision);
- int src = instr->VFPMRegCode(src_precision);
- int32_t kMaxInt = v8::internal::kMaxInt;
- int32_t kMinInt = v8::internal::kMinInt;
- switch (mode) {
- case RM:
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- sint = sint > val ? sint - 1 : sint;
-
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
- sint = sint > val ? sint - 1 : sint;
-
- set_s_register_from_sinteger(dst, sint);
- }
- break;
- case RZ:
- if (src_precision == kDoublePrecision) {
- double val = get_double_from_d_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
-
- set_s_register_from_sinteger(dst, sint);
- } else {
- float val = get_float_from_s_register(src);
-
- inv_op_vfp_flag_ = (val > kMaxInt) || (val < kMinInt) || (val != val);
-
- int sint = unsigned_integer ? static_cast<uint32_t>(val) :
- static_cast<int32_t>(val);
-
- set_s_register_from_sinteger(dst, sint);
- }
- break;
-
- default:
- UNREACHABLE();
- }
+ // Update the destination register.
+ set_s_register_from_sinteger(dst, temp);
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
- int dst = instr->VFPDRegCode(src_precision);
- int src = instr->VFPMRegCode(kSinglePrecision);
+ int dst = instr->VFPDRegValue(src_precision);
+ int src = instr->VFPMRegValue(kSinglePrecision);
int val = get_sinteger_from_s_register(src);
@@ -2758,24 +2830,24 @@
}
-// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
+// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
-void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
- ASSERT((instr->TypeField() == 6));
+void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
+ ASSERT((instr->TypeValue() == 6));
- if (instr->CoprocessorField() == 0xA) {
- switch (instr->OpcodeField()) {
+ if (instr->CoprocessorValue() == 0xA) {
+ switch (instr->OpcodeValue()) {
case 0x8:
case 0xA:
case 0xC:
case 0xE: { // Load and store single precision float to memory.
- int rn = instr->RnField();
- int vd = instr->VFPDRegCode(kSinglePrecision);
- int offset = instr->Immed8Field();
+ int rn = instr->RnValue();
+ int vd = instr->VFPDRegValue(kSinglePrecision);
+ int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
}
@@ -2794,16 +2866,16 @@
UNIMPLEMENTED(); // Not used by V8.
break;
}
- } else if (instr->CoprocessorField() == 0xB) {
- switch (instr->OpcodeField()) {
+ } else if (instr->CoprocessorValue() == 0xB) {
+ switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
- int rt = instr->RtField();
- int rn = instr->RnField();
- int vm = instr->VmField();
+ int rt = instr->RtValue();
+ int rn = instr->RnValue();
+ int vm = instr->VmValue();
if (instr->HasL()) {
int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
@@ -2821,9 +2893,9 @@
break;
case 0x8:
case 0xC: { // Load and store double to memory.
- int rn = instr->RnField();
- int vd = instr->VdField();
- int offset = instr->Immed8Field();
+ int rn = instr->RnValue();
+ int vd = instr->VdValue();
+ int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
}
@@ -2850,7 +2922,7 @@
// Executes the current instruction.
-void Simulator::InstructionDecode(Instr* instr) {
+void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
CheckICache(instr);
}
@@ -2864,10 +2936,10 @@
reinterpret_cast<byte*>(instr));
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
- if (instr->ConditionField() == special_condition) {
+ if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
} else if (ConditionallyExecute(instr)) {
- switch (instr->TypeField()) {
+ switch (instr->TypeValue()) {
case 0:
case 1: {
DecodeType01(instr);
@@ -2902,9 +2974,14 @@
break;
}
}
+ // If the instruction is a non taken conditional stop, we need to skip the
+ // inlined message address.
+ } else if (instr->IsStop()) {
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
}
if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ set_register(pc, reinterpret_cast<int32_t>(instr)
+ + Instruction::kInstrSize);
}
}
@@ -2918,7 +2995,7 @@
// Fast version of the dispatch loop without checking whether the simulator
// should be stopping at a particular executed instruction.
while (program_counter != end_sim_pc) {
- Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
InstructionDecode(instr);
program_counter = get_pc();
@@ -2927,7 +3004,7 @@
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
while (program_counter != end_sim_pc) {
- Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
Debugger dbg(this);
@@ -3048,7 +3125,7 @@
return address;
}
-} } // namespace assembler::arm
+} } // namespace v8::internal
#endif // USE_SIMULATOR
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 7bfe76a..5256ae3 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -79,9 +79,10 @@
#include "constants-arm.h"
#include "hashmap.h"
+#include "assembler.h"
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
class CachePage {
public:
@@ -203,11 +204,11 @@
};
// Unsupported instructions use Format to print an error and stop execution.
- void Format(Instr* instr, const char* format);
+ void Format(Instruction* instr, const char* format);
// Checks if the current instruction should be executed based on its
// condition bits.
- bool ConditionallyExecute(Instr* instr);
+ bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
@@ -225,13 +226,13 @@
void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes
- int32_t GetShiftRm(Instr* instr, bool* carry_out);
- int32_t GetImm(Instr* instr, bool* carry_out);
- void HandleRList(Instr* instr, bool load);
- void SoftwareInterrupt(Instr* instr);
+ int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+ int32_t GetImm(Instruction* instr, bool* carry_out);
+ void HandleRList(Instruction* instr, bool load);
+ void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
- inline bool isStopInstruction(Instr* instr);
+ inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
@@ -245,47 +246,49 @@
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
- inline uint16_t ReadHU(int32_t addr, Instr* instr);
- inline int16_t ReadH(int32_t addr, Instr* instr);
+ inline uint16_t ReadHU(int32_t addr, Instruction* instr);
+ inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
- inline void WriteH(int32_t addr, int16_t value, Instr* instr);
+ inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
- inline int ReadW(int32_t addr, Instr* instr);
- inline void WriteW(int32_t addr, int value, Instr* instr);
+ inline int ReadW(int32_t addr, Instruction* instr);
+ inline void WriteW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type.
- void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
- void DecodeType2(Instr* instr);
- void DecodeType3(Instr* instr);
- void DecodeType4(Instr* instr);
- void DecodeType5(Instr* instr);
- void DecodeType6(Instr* instr);
- void DecodeType7(Instr* instr);
+ // Both type 0 and type 1 rolled into one.
+ void DecodeType01(Instruction* instr);
+ void DecodeType2(Instruction* instr);
+ void DecodeType3(Instruction* instr);
+ void DecodeType4(Instruction* instr);
+ void DecodeType5(Instruction* instr);
+ void DecodeType6(Instruction* instr);
+ void DecodeType7(Instruction* instr);
// Support for VFP.
- void DecodeTypeVFP(Instr* instr);
- void DecodeType6CoprocessorIns(Instr* instr);
+ void DecodeTypeVFP(Instruction* instr);
+ void DecodeType6CoprocessorIns(Instruction* instr);
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
- void DecodeVCMP(Instr* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+ void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+ void DecodeVCMP(Instruction* instr);
+ void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction.
- void InstructionDecode(Instr* instr);
+ void InstructionDecode(Instruction* instr);
// ICache.
- static void CheckICache(Instr* instr);
+ static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page);
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
- bool fp_return);
+ static void* RedirectExternalReference(
+ void* external_function,
+ v8::internal::ExternalReference::Type type);
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
@@ -311,7 +314,7 @@
bool v_flag_FPSCR_;
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
- FPSCRRoundingModes FPSCR_rounding_mode_;
+ VFPRoundingMode FPSCR_rounding_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
@@ -330,8 +333,8 @@
static v8::internal::HashMap* i_cache_;
// Registered breakpoints.
- Instr* break_pc_;
- instr_t break_instr_;
+ Instruction* break_pc_;
+ Instr break_instr_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
@@ -344,27 +347,22 @@
// instruction, if bit 31 of watched_stops[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
- struct StopCoundAndDesc {
+ struct StopCountAndDesc {
uint32_t count;
char* desc;
};
- StopCoundAndDesc watched_stops[kNumOfWatchedStops];
+ StopCountAndDesc watched_stops[kNumOfWatchedStops];
};
-} } // namespace assembler::arm
-
-
-namespace v8 {
-namespace internal {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \
+ reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- assembler::arm::Simulator::current()->Call( \
+ Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -380,16 +378,16 @@
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return assembler::arm::Simulator::current()->StackLimit();
+ return Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
+ Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- assembler::arm::Simulator::current()->PopAddress();
+ Simulator::current()->PopAddress();
}
};
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index b7ec5d2..9ef6115 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -370,27 +370,31 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss) {
+ Label* miss,
+ bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+ support_wrappers ? &check_wrapper : miss);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
+ }
}
@@ -521,7 +525,7 @@
// -----------------------------------
// Check that the function really is a function.
- __ BranchOnSmi(r1, miss);
+ __ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
@@ -571,72 +575,94 @@
__ CallStub(&stub);
}
+static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
Register scratch) {
__ mov(scratch, Operand(Smi::FromInt(0)));
- __ push(scratch);
- __ push(scratch);
- __ push(scratch);
- __ push(scratch);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ push(scratch);
+ }
}
// Undoes the effects of ReserveSpaceForFastApiCall.
static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(4);
+ __ Drop(kFastApiCallArguments);
}
-// Generates call to FastHandleApiCall builtin.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : holder (set by CheckPrototypes)
+ // -- sp[4] : callee js function
+ // -- sp[8] : call data
+ // -- sp[12] : last js argument
+ // -- ...
+ // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 4) * 4] : receiver
+ // -----------------------------------
// Get the function and setup the context.
JSFunction* function = optimization.constant_function();
__ mov(r5, Operand(Handle<JSFunction>(function)));
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
- bool info_loaded = false;
- Object* callback = optimization.api_call_info()->callback();
- if (Heap::InNewSpace(callback)) {
- info_loaded = true;
- __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
- __ ldr(r7, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
- } else {
- __ Move(r7, Handle<Object>(callback));
- }
Object* call_data = optimization.api_call_info()->data();
+ Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
if (Heap::InNewSpace(call_data)) {
- if (!info_loaded) {
- __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
- }
+ __ Move(r0, api_call_info_handle);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
__ Move(r6, Handle<Object>(call_data));
}
+ // Store js function and call data.
+ __ stm(ib, sp, r5.bit() | r6.bit());
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit());
- __ sub(sp, sp, Operand(1 * kPointerSize));
+ // r2 points to call data as expected by Arguments
+ // (refer to layout above).
+ __ add(r2, sp, Operand(2 * kPointerSize));
- // Set the number of arguments.
- __ mov(r0, Operand(argc + 4));
+ Object* callback = optimization.api_call_info()->callback();
+ Address api_function_address = v8::ToCData<Address>(callback);
+ ApiFunction fun(api_function_address);
- // Jump to the fast api call builtin (tail call).
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::FastHandleApiCall));
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ const int kApiStackSpace = 4;
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // r0 = v8::Arguments&
+ // Arguments is after the return address.
+ __ add(r0, sp, Operand(1 * kPointerSize));
+ // v8::Arguments::implicit_args = data
+ __ str(r2, MemOperand(r0, 0 * kPointerSize));
+ // v8::Arguments::values = last argument
+ __ add(ip, r2, Operand(argc * kPointerSize));
+ __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ // v8::Arguments::length_ = argc
+ __ mov(ip, Operand(argc));
+ __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ // v8::Arguments::is_construct_call = 0
+ __ mov(ip, Operand(0));
+ __ str(ip, MemOperand(r0, 3 * kPointerSize));
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ MaybeObject* result = masm->TryCallApiFunctionAndReturn(
+ &fun, argc + kFastApiCallArguments + 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+ return Heap::undefined_value();
}
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -646,36 +672,36 @@
arguments_(arguments),
name_(name) {}
- void Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
+ __ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
if (optimization.is_constant_call()) {
- CompileCacheable(masm,
- object,
- receiver,
- scratch1,
- scratch2,
- scratch3,
- holder,
- lookup,
- name,
- optimization,
- miss);
+ return CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
} else {
CompileRegular(masm,
object,
@@ -686,21 +712,22 @@
name,
holder,
miss);
+ return Heap::undefined_value();
}
}
private:
- void CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label) {
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -764,7 +791,10 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ MaybeObject* result = GenerateFastApiDirectCall(masm,
+ optimization,
+ arguments_.immediate());
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@@ -782,6 +812,8 @@
if (can_do_fast_api_call) {
FreeSpaceForFastApiCall(masm);
}
+
+ return Heap::undefined_value();
}
void CompileRegular(MacroAssembler* masm,
@@ -902,6 +934,111 @@
}
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If VFP3 is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+ Register dst,
+ Register wordoffset,
+ Register ival,
+ Register fval,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, ival);
+ __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
+ __ vcvt_f32_s32(s0, s0);
+ __ vstr(s0, scratch1, 0);
+ } else {
+ Label not_special, done;
+ // Move sign bit from source to destination. This works because the sign
+ // bit in the exponent word of the double has the same position and polarity
+ // as the 2's complement sign bit in a Smi.
+ ASSERT(kBinary32SignMask == 0x80000000u);
+
+ __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
+ // Negate value if it is negative.
+ __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ // We have -1, 0 or 1, which we treat specially. Register ival contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ cmp(ival, Operand(1));
+ __ b(gt, ¬_special);
+
+ // For 1 or -1 we need to or in the 0 exponent (biased).
+ static const uint32_t exponent_word_for_1 =
+ kBinary32ExponentBias << kBinary32ExponentShift;
+
+ __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
+ __ b(&done);
+
+ __ bind(¬_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ Register zeros = scratch2;
+ __ CountLeadingZeros(zeros, ival, scratch1);
+
+ // Compute exponent and or it into the exponent register.
+ __ rsb(scratch1,
+ zeros,
+ Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
+
+ __ orr(fval,
+ fval,
+ Operand(scratch1, LSL, kBinary32ExponentShift));
+
+ // Shift up the source chopping the top bit off.
+ __ add(zeros, zeros, Operand(1));
+ // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+ __ mov(ival, Operand(ival, LSL, zeros));
+ // And the top (top 20 bits).
+ __ orr(fval,
+ fval,
+ Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
+
+ __ bind(&done);
+ __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
+ }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+
+ __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+ __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
+ } else {
+ __ mov(loword, Operand(0, RelocInfo::NONE));
+ __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+ }
+}
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -1089,17 +1226,16 @@
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@@ -1120,7 +1256,7 @@
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(load_callback_property, 5, 1);
- return true;
+ return Heap::undefined_value(); // Success.
}
@@ -1138,7 +1274,7 @@
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
+ __ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -1406,7 +1542,7 @@
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), receiver,
@@ -1460,7 +1596,7 @@
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
- __ BranchOnNotSmi(r4, &with_write_barrier);
+ __ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit);
__ Drop(argc + 1);
__ Ret();
@@ -1567,7 +1703,7 @@
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object),
@@ -1904,7 +2040,7 @@
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ BranchOnSmi(r1, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -1947,11 +2083,11 @@
// - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3,
Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits));
+ __ orr(r9, r9, Operand(kRoundToMinusInf));
__ vmsr(r9);
// Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
// Use vcvt latency to start checking for special cases.
// Get the argument exponent and clear the sign bit.
@@ -2063,7 +2199,7 @@
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ BranchOnNotSmi(r0, ¬_smi);
+ __ JumpIfNotSmi(r0, ¬_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
@@ -2260,7 +2396,8 @@
}
if (depth != kInvalidProtoDepth) {
- GenerateFastApiCall(masm(), optimization, argc);
+ MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@@ -2304,16 +2441,19 @@
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), r2);
- compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- r1,
- r3,
- r4,
- r0,
- &miss);
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ r1,
+ r3,
+ r4,
+ r0,
+ &miss);
+ if (result->IsFailure()) {
+ return result;
+ }
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
@@ -2541,9 +2681,18 @@
__ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+ __ cmp(r5, r6);
+ __ b(eq, &miss);
+
// Store the value in the cell.
- __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
__ Ret();
@@ -2633,12 +2782,11 @@
// -----------------------------------
Label miss;
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
+ callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2785,12 +2933,11 @@
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
+ r4, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2890,7 +3037,7 @@
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
+ GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
@@ -2972,6 +3119,38 @@
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ r1,
+ r0,
+ r2,
+ r3,
+ r4,
+ r5,
+ r0,
+ &miss,
+ &miss,
+ &miss);
+
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -3224,6 +3403,603 @@
}
+static bool IsElementTypeSigned(ExternalArrayType array_type) {
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalShortArray:
+ case kExternalIntArray:
+ return true;
+
+ case kExternalUnsignedByteArray:
+ case kExternalUnsignedShortArray:
+ case kExternalUnsignedIntArray:
+ return false;
+
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, failed_allocation;
+
+ Register key = r0;
+ Register receiver = r1;
+
+ // Check that the object isn't a smi
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check that the object is a JS object. Load map into r2.
+ __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+ __ cmp(r2, ip);
+ __ b(ne, &slow);
+
+ // Check that the index is in range.
+ __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+ __ cmp(ip, Operand(key, ASR, kSmiTagSize));
+ // Unsigned comparison catches both negative and too-large values.
+ __ b(lo, &slow);
+
+ // r3: elements array
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+ // r3: base pointer of external storage
+
+ // We are not untagging smi key and instead work with it
+ // as if it was premultiplied by 2.
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+ Register value = r2;
+ switch (array_type) {
+ case kExternalByteArray:
+ __ ldrsb(value, MemOperand(r3, key, LSR, 1));
+ break;
+ case kExternalUnsignedByteArray:
+ __ ldrb(value, MemOperand(r3, key, LSR, 1));
+ break;
+ case kExternalShortArray:
+ __ ldrsh(value, MemOperand(r3, key, LSL, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ ldrh(value, MemOperand(r3, key, LSL, 0));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ ldr(value, MemOperand(r3, key, LSL, 1));
+ break;
+ case kExternalFloatArray:
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ add(r2, r3, Operand(key, LSL, 1));
+ __ vldr(s0, r2, 0);
+ } else {
+ __ ldr(value, MemOperand(r3, key, LSL, 1));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // r2: value
+ // For floating-point array type
+ // s0: value (if VFP3 is supported)
+ // r2: value (if VFP3 is not supported)
+
+ if (array_type == kExternalIntArray) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ __ cmp(value, Operand(0xC0000000));
+ __ b(mi, &box_int);
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+
+ __ bind(&box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ mov(r0, r5);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, value);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ WriteInt32ToHeapNumberStub stub(value, r0, r3);
+ __ TailCallStub(&stub);
+ }
+ } else if (array_type == kExternalUnsignedIntArray) {
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle either of the top two bits being set in the value.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ Label box_int, done;
+ __ tst(value, Operand(0xC0000000));
+ __ b(ne, &box_int);
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+
+ __ bind(&box_int);
+ __ vmov(s0, value);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
+ // registers - also when jumping due to exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+
+ __ vcvt_f64_u32(d0, s0);
+ __ sub(r1, r2, Operand(kHeapObjectTag));
+ __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+ __ mov(r0, r2);
+ __ Ret();
+ } else {
+ // Check whether unsigned integer fits into smi.
+ Label box_int_0, box_int_1, done;
+ __ tst(value, Operand(0x80000000));
+ __ b(ne, &box_int_0);
+ __ tst(value, Operand(0x40000000));
+ __ b(ne, &box_int_1);
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+
+ Register hiword = value; // r2.
+ Register loword = r3;
+
+ __ bind(&box_int_0);
+ // Integer does not have leading zeros.
+ GenerateUInt2Double(masm(), hiword, loword, r4, 0);
+ __ b(&done);
+
+ __ bind(&box_int_1);
+ // Integer has one leading zero.
+ GenerateUInt2Double(masm(), hiword, loword, r4, 1);
+
+
+ __ bind(&done);
+ // Integer was converted to double in registers hiword:loword.
+ // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
+ // clobbers all registers - also when jumping due to exhausted young
+ // space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
+
+ __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+ __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+
+ __ mov(r0, r4);
+ __ Ret();
+ }
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ vcvt_f64_f32(d0, s0);
+ __ sub(r1, r2, Operand(kHeapObjectTag));
+ __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+ __ mov(r0, r2);
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
+ // VFP is not available, do manual single to double conversion.
+
+ // r2: floating point value (binary32)
+ // r3: heap number for result
+
+ // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
+ // the slow case from here.
+ __ and_(r0, value, Operand(kBinary32MantissaMask));
+
+ // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
+ // the slow case from here.
+ __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
+ __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ teq(r1, Operand(0x00));
+ __ b(eq, &exponent_rebiased);
+
+ __ teq(r1, Operand(0xff));
+ __ mov(r1, Operand(0x7ff), LeaveCC, eq);
+ __ b(eq, &exponent_rebiased);
+
+ // Rebias exponent.
+ __ add(r1,
+ r1,
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ and_(r2, value, Operand(kBinary32SignMask));
+ value = no_reg;
+ __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
+ __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
+
+ __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+ __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+
+ __ mov(r0, r3);
+ __ Ret();
+ }
+
+ } else {
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+ }
+
+ // Slow case, key and receiver still in r0 and r1.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+
+ __ Push(r1, r0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, check_heap_number;
+
+ // Register usage.
+ Register value = r0;
+ Register key = r1;
+ Register receiver = r2;
+ // r3 mostly holds the elements array or the destination external array.
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the object is a JS object. Load map into r3.
+ __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
+ __ b(le, &slow);
+
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check that the elements array is the appropriate type of ExternalArray.
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+ __ cmp(r4, ip);
+ __ b(ne, &slow);
+
+ // Check that the index is in range.
+ __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
+ __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+ __ cmp(r4, ip);
+ // Unsigned comparison catches both negative and too-large values.
+ __ b(hs, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // r3: external array.
+ // r4: key (integer).
+ __ JumpIfNotSmi(value, &check_heap_number);
+ __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+ // r3: base pointer of external storage.
+ // r4: key (integer).
+ // r5: value (integer).
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ case kExternalFloatArray:
+ // Perform int-to-float conversion and store to memory.
+ StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Entry registers are intact, r0 holds the value which is the return value.
+ __ Ret();
+
+
+ // r3: external array.
+ // r4: index (integer).
+ __ bind(&check_heap_number);
+ __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+ // r3: base pointer of external storage.
+ // r4: key (integer).
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+
+ if (array_type == kExternalFloatArray) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 2));
+ __ vcvt_f32_f64(s0, d0);
+ __ vstr(s0, r5, 0);
+ } else {
+ // Need to perform float-to-int conversion.
+ // Test for NaN or infinity (both give zero).
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
+
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, value, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+
+ __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs and Infinities have all-one exponents so they sign extend to -1.
+ __ cmp(r6, Operand(-1));
+ __ mov(r5, Operand(0), LeaveCC, eq);
+
+ // Not infinity or NaN simply convert to int.
+ if (IsElementTypeSigned(array_type)) {
+ __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
+ } else {
+ __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
+ }
+ __ vmov(r5, s0, ne);
+
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // Entry registers are intact, r0 holds the value which is the return value.
+ __ Ret();
+ } else {
+ // VFP3 is not available do manual conversions.
+ __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+ if (array_type == kExternalFloatArray) {
+ Label done, nan_or_infinity_or_zero;
+ static const int kMantissaInHiWordShift =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaInLoWordShift =
+ kBitsPerInt - kMantissaInHiWordShift;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ // Rebias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ add(r9,
+ r9,
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+ __ cmp(r9, Operand(kBinary32MaxExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+ __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
+ __ b(gt, &done);
+
+ __ cmp(r9, Operand(kBinary32MinExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+ __ b(lt, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
+
+ __ bind(&done);
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
+ __ Ret();
+
+ __ bind(&nan_or_infinity_or_zero);
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r9, r9, r7);
+ __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ b(&done);
+ } else {
+ bool is_signed_type = IsElementTypeSigned(array_type);
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+
+ Label done, sign;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ // Unbias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
+ // If exponent is negative then result is 0.
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
+ __ b(mi, &done);
+
+ // If exponent is too big then result is minimal value.
+ __ cmp(r9, Operand(meaningfull_bits - 1));
+ __ mov(r5, Operand(min_value), LeaveCC, ge);
+ __ b(ge, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+ __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
+ __ b(pl, &sign);
+
+ __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
+ __ mov(r5, Operand(r5, LSL, r9));
+ __ rsb(r9, r9, Operand(meaningfull_bits));
+ __ orr(r5, r5, Operand(r6, LSR, r9));
+
+ __ bind(&sign);
+ __ teq(r7, Operand(0, RelocInfo::NONE));
+ __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ __ bind(&done);
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+
+ // Entry registers are intact.
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(r2, r1, r0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 45f4876..b4b518c 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -68,6 +68,8 @@
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
+ ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
+ expected->tos_known_smi_map_);
ASSERT(expected->IsCompatibleWith(this));
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
@@ -76,7 +78,7 @@
void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
- expected->tos_known_smi_map_ &= tos_known_smi_map_;
+ tos_known_smi_map_ &= expected->tos_known_smi_map_;
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
@@ -327,18 +329,25 @@
}
-void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+void VirtualFrame::CallStoreIC(Handle<String> name,
+ bool is_contextual,
+ StrictModeFlag strict_mode) {
+ Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
PopToR0();
+ RelocInfo::Mode mode;
if (is_contextual) {
SpillAll();
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
EmitPop(r1);
SpillAll();
+ mode = RelocInfo::CODE_TARGET;
}
__ mov(r2, Operand(name));
- CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+ CallCodeObject(ic, mode, 0);
}
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 82b4d08..b6e794a 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -294,7 +294,8 @@
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in r0.
- void CallStoreIC(Handle<String> name, bool is_contextual);
+ void CallStoreIC(Handle<String> name, bool is_contextual,
+ StrictModeFlag strict_mode);
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in r0.
diff --git a/src/array.js b/src/array.js
index 0d7a7cb..1298434 100644
--- a/src/array.js
+++ b/src/array.js
@@ -171,8 +171,9 @@
}
return %StringBuilderConcat(elements, length2, '');
} finally {
- // Make sure to pop the visited array no matter what happens.
- if (is_array) visited_arrays.pop();
+ // Make sure to remove the last element of the visited array no
+ // matter what happens.
+ if (is_array) visited_arrays.length = visited_arrays.length - 1;
}
}
@@ -603,16 +604,17 @@
}
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given differently from when an undefined delete count is given.
+ // given as a request to delete all the elements from the start.
+ // And it differs from the case of undefined delete count.
// This does not follow ECMA-262, but we do the same for
// compatibility.
var del_count = 0;
- if (num_arguments > 1) {
+ if (num_arguments == 1) {
+ del_count = len - start_i;
+ } else {
del_count = TO_INTEGER(delete_count);
if (del_count < 0) del_count = 0;
if (del_count > len - start_i) del_count = len - start_i;
- } else {
- del_count = len - start_i;
}
var deleted_elements = [];
@@ -1016,9 +1018,11 @@
} else {
index = TO_INTEGER(index);
// If index is negative, index from the end of the array.
- if (index < 0) index = length + index;
- // If index is still negative, search the entire array.
- if (index < 0) index = 0;
+ if (index < 0) {
+ index = length + index;
+ // If index is still negative, search the entire array.
+ if (index < 0) index = 0;
+ }
}
var min = index;
var max = length;
diff --git a/src/assembler.cc b/src/assembler.cc
index fb9a4af..ef2094f 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -553,8 +553,9 @@
: address_(Redirect(Builtins::c_function_address(id))) {}
-ExternalReference::ExternalReference(ApiFunction* fun)
- : address_(Redirect(fun->address())) {}
+ExternalReference::ExternalReference(
+ ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
+ : address_(Redirect(fun->address(), type)) {}
ExternalReference::ExternalReference(Builtins::Name name)
@@ -838,8 +839,8 @@
return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
}
if (!isinf(x)) {
- if (y == 0.5) return sqrt(x);
- if (y == -0.5) return 1.0 / sqrt(x);
+ if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
+ if (y == -0.5) return 1.0 / sqrt(x + 0.0);
}
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return OS::nan_value();
@@ -888,17 +889,18 @@
UNREACHABLE();
}
// Passing true as 2nd parameter indicates that they return an fp value.
- return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
+ return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
}
ExternalReference ExternalReference::compare_doubles() {
return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
- false));
+ BUILTIN_CALL));
}
-ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
+ExternalReference::ExternalReferenceRedirector*
+ ExternalReference::redirector_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/assembler.h b/src/assembler.h
index 4ef61e4..b089b09 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -181,11 +181,10 @@
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- CODE_TARGET_CONTEXT, // Code target used for contextual loads.
+ CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,
-
GLOBAL_PROPERTY_CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
@@ -203,7 +202,7 @@
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NONE, // never recorded
LAST_CODE_ENUM = CODE_TARGET,
- LAST_GCED_ENUM = EMBEDDED_OBJECT
+ LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
};
@@ -460,9 +459,6 @@
#endif
-typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
-
-
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in
// an ExternalReference instance. This is done in order to track the origin of
@@ -470,9 +466,29 @@
// addresses when deserializing a heap.
class ExternalReference BASE_EMBEDDED {
public:
+ // Used in the simulator to support different native api calls.
+ //
+ // BUILTIN_CALL - builtin call.
+ // MaybeObject* f(v8::internal::Arguments).
+ //
+ // FP_RETURN_CALL - builtin call that returns floating point.
+ // double f(double, double).
+ //
+ // DIRECT_CALL - direct call to API function native callback
+ // from generated code.
+ // Handle<Value> f(v8::Arguments&)
+ //
+ enum Type {
+ BUILTIN_CALL, // default
+ FP_RETURN_CALL,
+ DIRECT_CALL
+ };
+
+ typedef void* ExternalReferenceRedirector(void* original, Type type);
+
explicit ExternalReference(Builtins::CFunctionId id);
- explicit ExternalReference(ApiFunction* ptr);
+ explicit ExternalReference(ApiFunction* ptr, Type type);
explicit ExternalReference(Builtins::Name name);
@@ -600,17 +616,19 @@
static ExternalReferenceRedirector* redirector_;
- static void* Redirect(void* address, bool fp_return = false) {
+ static void* Redirect(void* address,
+ Type type = ExternalReference::BUILTIN_CALL) {
if (redirector_ == NULL) return address;
- void* answer = (*redirector_)(address, fp_return);
+ void* answer = (*redirector_)(address, type);
return answer;
}
- static void* Redirect(Address address_arg, bool fp_return = false) {
+ static void* Redirect(Address address_arg,
+ Type type = ExternalReference::BUILTIN_CALL) {
void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector_ == NULL) ?
address :
- (*redirector_)(address, fp_return);
+ (*redirector_)(address, type);
return answer;
}
diff --git a/src/ast.cc b/src/ast.cc
index 4fe89be..772684c 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -32,7 +32,6 @@
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
-#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -216,17 +215,28 @@
return (*h1)->Equals(*h2);
}
-bool IsEqualSmi(void* first, void* second) {
- ASSERT((*reinterpret_cast<Smi**>(first))->IsSmi());
- ASSERT((*reinterpret_cast<Smi**>(second))->IsSmi());
- Handle<Smi> h1(reinterpret_cast<Smi**>(first));
- Handle<Smi> h2(reinterpret_cast<Smi**>(second));
- return (*h1)->value() == (*h2)->value();
+
+bool IsEqualNumber(void* first, void* second) {
+ ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
+ ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
+
+ Handle<Object> h1(reinterpret_cast<Object**>(first));
+ Handle<Object> h2(reinterpret_cast<Object**>(second));
+ if (h1->IsSmi()) {
+ return h2->IsSmi() && *h1 == *h2;
+ }
+ if (h2->IsSmi()) return false;
+ Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
+ Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
+ ASSERT(isfinite(n1->value()));
+ ASSERT(isfinite(n2->value()));
+ return n1->value() == n2->value();
}
+
void ObjectLiteral::CalculateEmitStore() {
HashMap properties(&IsEqualString);
- HashMap elements(&IsEqualSmi);
+ HashMap elements(&IsEqualNumber);
for (int i = this->properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = this->properties()->at(i);
Literal* literal = property->key();
@@ -239,16 +249,19 @@
uint32_t hash;
HashMap* table;
void* key;
- uint32_t index;
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
- ASSERT(!name->AsArrayIndex(&index));
- key = name.location();
- hash = name->Hash();
- table = &properties;
- } else if (handle->ToArrayIndex(&index)) {
+ if (name->AsArrayIndex(&hash)) {
+ Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
+ key = key_handle.location();
+ table = &elements;
+ } else {
+ key = name.location();
+ hash = name->Hash();
+ table = &properties;
+ }
+ } else if (handle->ToArrayIndex(&hash)) {
key = handle.location();
- hash = index;
table = &elements;
} else {
ASSERT(handle->IsNumber());
@@ -515,6 +528,8 @@
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
is_array_length_ = true;
+ } else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
+ is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this,
Builtins::LoadIC_FunctionPrototype)) {
is_function_prototype_ = true;
@@ -560,25 +575,25 @@
}
-static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
+static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
SharedFunctionInfo* info = target->shared();
- if (target->NeedsArgumentsAdaption()) {
- // If the number of formal parameters of the target function
- // does not match the number of arguments we're passing, we
- // don't want to deal with it.
- return info->formal_parameter_count() == arity;
- } else {
- // If the target doesn't need arguments adaption, we can call
- // it directly, but we avoid to do so if it has a custom call
- // generator, because that is likely to generate better code.
- return !info->HasBuiltinFunctionId() ||
- !CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
- }
+ // If the number of formal parameters of the target function does
+ // not match the number of arguments we're passing, we don't want to
+ // deal with it. Otherwise, we can call it directly.
+ return !target->NeedsArgumentsAdaption() ||
+ info->formal_parameter_count() == arity;
}
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
- holder_ = Handle<JSObject>::null();
+ if (check_type_ == RECEIVER_MAP_CHECK) {
+ // For primitive checks the holder is set up to point to the
+ // corresponding prototype object, i.e. one step of the algorithm
+ // below has been already performed.
+ // For non-primitive checks we clear it to allow computing targets
+ // for polymorphic calls.
+ holder_ = Handle<JSObject>::null();
+ }
while (true) {
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
@@ -589,7 +604,7 @@
type = Handle<Map>(holder()->map());
} else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return CallWithoutIC(target_, arguments()->length());
+ return CanCallWithoutIC(target_, arguments()->length());
} else {
return false;
}
@@ -603,14 +618,16 @@
cell_ = Handle<JSGlobalPropertyCell>::null();
LookupResult lookup;
global->Lookup(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == NORMAL) {
+ if (lookup.IsProperty() &&
+ lookup.type() == NORMAL &&
+ lookup.holder() == *global) {
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!Heap::InNewSpace(*candidate)
- && CallWithoutIC(candidate, arguments()->length())) {
+ if (!Heap::InNewSpace(*candidate) &&
+ CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate;
return true;
}
@@ -648,27 +665,20 @@
map = receiver_types_->at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- map = Handle<Map>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_)->map());
+ holder_ = Handle<JSObject>(
+ oracle->GetPrototypeForPrimitiveCheck(check_type_));
+ map = Handle<Map>(holder_->map());
}
is_monomorphic_ = ComputeTarget(map, name);
}
}
-void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
- TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
- is_smi_only_ = left.IsSmi() && right.IsSmi();
-}
-
-
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT);
- TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT);
- if (left.IsSmi() && right.IsSmi()) {
+ TypeInfo info = oracle->CompareType(this);
+ if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
- } else if (left.IsNonPrimitive() && right.IsNonPrimitive()) {
+ } else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);
diff --git a/src/ast.h b/src/ast.h
index f55ddcd..2aee5d7 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1205,9 +1205,10 @@
key_(key),
pos_(pos),
type_(type),
- is_monomorphic_(false),
receiver_types_(NULL),
+ is_monomorphic_(false),
is_array_length_(false),
+ is_string_length_(false),
is_function_prototype_(false),
is_arguments_access_(false) { }
@@ -1221,6 +1222,7 @@
int position() const { return pos_; }
bool is_synthetic() const { return type_ == SYNTHETIC; }
+ bool IsStringLength() const { return is_string_length_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Marks that this is actually an argument rewritten to a keyed property
@@ -1249,11 +1251,12 @@
int pos_;
Type type_;
- bool is_monomorphic_;
ZoneMapList* receiver_types_;
- bool is_array_length_;
- bool is_function_prototype_;
- bool is_arguments_access_;
+ bool is_monomorphic_ : 1;
+ bool is_array_length_ : 1;
+ bool is_string_length_ : 1;
+ bool is_function_prototype_ : 1;
+ bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_;
// Dummy property used during preparsing.
@@ -1395,7 +1398,7 @@
Expression* left,
Expression* right,
int pos)
- : op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
+ : op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op));
right_id_ = (op == Token::AND || op == Token::OR)
? static_cast<int>(GetNextId())
@@ -1416,10 +1419,6 @@
Expression* right() const { return right_; }
int position() const { return pos_; }
- // Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiOnly() const { return is_smi_only_; }
-
// Bailout support.
int RightId() const { return right_id_; }
@@ -1428,7 +1427,6 @@
Expression* left_;
Expression* right_;
int pos_;
- bool is_smi_only_;
// The short-circuit logical operations have an AST ID for their
// right-hand subexpression.
int right_id_;
@@ -1675,7 +1673,8 @@
int start_position,
int end_position,
bool is_expression,
- bool contains_loops)
+ bool contains_loops,
+ bool strict_mode)
: name_(name),
scope_(scope),
body_(body),
@@ -1689,6 +1688,7 @@
end_position_(end_position),
is_expression_(is_expression),
contains_loops_(contains_loops),
+ strict_mode_(strict_mode),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false),
@@ -1705,6 +1705,7 @@
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
bool contains_loops() const { return contains_loops_; }
+ bool strict_mode() const { return strict_mode_; }
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -1747,6 +1748,7 @@
int end_position_;
bool is_expression_;
bool contains_loops_;
+ bool strict_mode_;
int function_token_position_;
Handle<String> inferred_name_;
bool try_full_codegen_;
diff --git a/src/bignum.cc b/src/bignum.cc
index dd1537a..a973974 100644
--- a/src/bignum.cc
+++ b/src/bignum.cc
@@ -67,7 +67,7 @@
int needed_bigits = kUInt64Size / kBigitSize + 1;
EnsureCapacity(needed_bigits);
for (int i = 0; i < needed_bigits; ++i) {
- bigits_[i] = value & kBigitMask;
+ bigits_[i] = static_cast<Chunk>(value & kBigitMask);
value = value >> kBigitSize;
}
used_digits_ = needed_bigits;
@@ -266,7 +266,7 @@
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
- bigits_[used_digits_] = carry & kBigitMask;
+ bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
used_digits_++;
carry >>= kBigitSize;
}
@@ -287,13 +287,13 @@
uint64_t product_low = low * bigits_[i];
uint64_t product_high = high * bigits_[i];
uint64_t tmp = (carry & kBigitMask) + product_low;
- bigits_[i] = tmp & kBigitMask;
+ bigits_[i] = static_cast<Chunk>(tmp & kBigitMask);
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
(product_high << (32 - kBigitSize));
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
- bigits_[used_digits_] = carry & kBigitMask;
+ bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
used_digits_++;
carry >>= kBigitSize;
}
@@ -748,7 +748,8 @@
for (int i = 0; i < other.used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
DoubleChunk remove = borrow + product;
- Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
+ Chunk difference =
+ bigits_[i + exponent_diff] - static_cast<Chunk>(remove & kBigitMask);
bigits_[i + exponent_diff] = difference & kBigitMask;
borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
(remove >> kBigitSize));
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index cae1a9a..415d2dd 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -349,7 +349,7 @@
prototype,
call_code,
is_ecma_native);
- SetProperty(target, symbol, function, DONT_ENUM);
+ SetLocalPropertyNoThrow(target, symbol, function, DONT_ENUM);
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
@@ -580,8 +580,8 @@
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
- SetProperty(prototype, Factory::constructor_symbol(),
- Top::object_function(), NONE);
+ SetLocalPropertyNoThrow(
+ prototype, Factory::constructor_symbol(), Top::object_function(), NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
@@ -683,7 +683,8 @@
global_context()->set_security_token(*inner_global);
Handle<String> object_name = Handle<String>(Heap::Object_symbol());
- SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM);
+ SetLocalPropertyNoThrow(inner_global, object_name,
+ Top::object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@@ -851,7 +852,7 @@
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
- SetProperty(global, name, json_object, DONT_ENUM);
+ SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
}
@@ -880,12 +881,12 @@
global_context()->set_arguments_boilerplate(*result);
// Note: callee must be added as the first property and
// length must be added as the second property.
- SetProperty(result, Factory::callee_symbol(),
- Factory::undefined_value(),
- DONT_ENUM);
- SetProperty(result, Factory::length_symbol(),
- Factory::undefined_value(),
- DONT_ENUM);
+ SetLocalPropertyNoThrow(result, Factory::callee_symbol(),
+ Factory::undefined_value(),
+ DONT_ENUM);
+ SetLocalPropertyNoThrow(result, Factory::length_symbol(),
+ Factory::undefined_value(),
+ DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
@@ -1050,7 +1051,6 @@
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
configure_instance_fun);
- INSTALL_NATIVE(JSFunction, "MakeMessage", make_message_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
}
@@ -1086,10 +1086,8 @@
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
- SetProperty(builtins,
- global_symbol,
- Handle<Object>(global_context()->global()),
- attributes);
+ Handle<Object> global_obj(global_context()->global());
+ SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
// Setup the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
@@ -1481,17 +1479,17 @@
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
Factory::LookupAsciiSymbol(FLAG_expose_natives_as);
- SetProperty(js_global, natives_string,
- Handle<JSObject>(js_global->builtins()), DONT_ENUM);
+ SetLocalPropertyNoThrow(js_global, natives_string,
+ Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
- SetProperty(Handle<JSObject>::cast(Error),
- name,
- Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
- NONE);
+ SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
+ name,
+ Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
+ NONE);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1508,8 +1506,8 @@
Handle<String> debug_string =
Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
- SetProperty(js_global, debug_string,
- Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
+ Handle<Object> global_proxy(Debug::debug_context()->global_proxy());
+ SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
}
#endif
}
@@ -1680,7 +1678,7 @@
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
- SetProperty(to, key, value, details.attributes());
+ SetLocalPropertyNoThrow(to, key, value, details.attributes());
break;
}
case CONSTANT_FUNCTION: {
@@ -1688,7 +1686,7 @@
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(descs->GetConstantFunction(i));
- SetProperty(to, key, fun, details.attributes());
+ SetLocalPropertyNoThrow(to, key, fun, details.attributes());
break;
}
case CALLBACKS: {
@@ -1738,7 +1736,7 @@
value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
}
PropertyDetails details = properties->DetailsAt(i);
- SetProperty(to, key, value, details.attributes());
+ SetLocalPropertyNoThrow(to, key, value, details.attributes());
}
}
}
@@ -1805,9 +1803,8 @@
AddToWeakGlobalContextList(*global_context_);
Top::set_context(*global_context_);
i::Counters::contexts_created_by_snapshot.Increment();
- result_ = global_context_;
JSFunction* empty_function =
- JSFunction::cast(result_->function_map()->prototype());
+ JSFunction::cast(global_context_->function_map()->prototype());
empty_function_ = Handle<JSFunction>(empty_function);
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
diff --git a/src/builtins.cc b/src/builtins.cc
index c4c9fc1..8fdc1b1 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -368,7 +368,9 @@
array_proto = JSObject::cast(array_proto->GetPrototype());
ASSERT(array_proto->elements() == Heap::empty_fixed_array());
// Object.prototype
- array_proto = JSObject::cast(array_proto->GetPrototype());
+ Object* proto = array_proto->GetPrototype();
+ if (proto == Heap::null_value()) return false;
+ array_proto = JSObject::cast(proto);
if (array_proto != global_context->initial_object_prototype()) return false;
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
ASSERT(array_proto->GetPrototype()->IsNull());
@@ -734,35 +736,39 @@
int n_arguments = args.length() - 1;
- // Return empty array when no arguments are supplied.
- if (n_arguments == 0) {
- return AllocateEmptyJSArray();
- }
-
int relative_start = 0;
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (!arg1->IsUndefined()) {
- return CallJsBuiltin("ArraySplice", args);
+ if (n_arguments > 0) {
+ Object* arg1 = args[1];
+ if (arg1->IsSmi()) {
+ relative_start = Smi::cast(arg1)->value();
+ } else if (!arg1->IsUndefined()) {
+ return CallJsBuiltin("ArraySplice", args);
+ }
}
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
: Min(relative_start, len);
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given differently from when an undefined delete count is given.
+ // given as a request to delete all the elements from the start.
+ // And it differs from the case of undefined delete count.
// This does not follow ECMA-262, but we do the same for
// compatibility.
- int delete_count = len;
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- delete_count = Smi::cast(arg2)->value();
- } else {
- return CallJsBuiltin("ArraySplice", args);
+ int actual_delete_count;
+ if (n_arguments == 1) {
+ ASSERT(len - actual_start >= 0);
+ actual_delete_count = len - actual_start;
+ } else {
+ int value = 0; // ToInteger(undefined) == 0
+ if (n_arguments > 1) {
+ Object* arg2 = args[2];
+ if (arg2->IsSmi()) {
+ value = Smi::cast(arg2)->value();
+ } else {
+ return CallJsBuiltin("ArraySplice", args);
+ }
}
+ actual_delete_count = Min(Max(value, 0), len - actual_start);
}
- int actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
JSArray* result_array = NULL;
if (actual_delete_count == 0) {
@@ -1228,7 +1234,12 @@
static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm);
+ LoadIC::GenerateStringLength(masm, false);
+}
+
+
+static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
+ LoadIC::GenerateStringLength(masm, true);
}
@@ -1282,44 +1293,6 @@
}
-static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
- MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
- MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
- MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
-}
-
-
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
@@ -1334,6 +1307,11 @@
}
+static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateInitialize(masm);
+}
+
+
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@@ -1344,8 +1322,18 @@
}
+static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateNormal(masm);
+}
+
+
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm);
+ StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict);
+}
+
+
+static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict);
}
@@ -1354,54 +1342,26 @@
}
+static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateArrayLength(masm);
+}
+
+
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm);
}
+static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateGlobalProxy(masm);
+}
+
+
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm);
}
-static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
-}
-
-
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm);
}
@@ -1509,13 +1469,13 @@
extra_args \
},
-#define DEF_FUNCTION_PTR_A(name, kind, state) \
- { FUNCTION_ADDR(Generate_##name), \
- NULL, \
- #name, \
- name, \
- Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state), \
- NO_EXTRA_ARGUMENTS \
+#define DEF_FUNCTION_PTR_A(name, kind, state, extra) \
+ { FUNCTION_ADDR(Generate_##name), \
+ NULL, \
+ #name, \
+ name, \
+ Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state, extra), \
+ NO_EXTRA_ARGUMENTS \
},
// Define array of pointers to generators and C builtin functions.
diff --git a/src/builtins.h b/src/builtins.h
index d2b4be2..ada23a7 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -63,86 +63,135 @@
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
- V(JSConstructCall, BUILTIN, UNINITIALIZED) \
- V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
- V(LazyCompile, BUILTIN, UNINITIALIZED) \
- V(LazyRecompile, BUILTIN, UNINITIALIZED) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructCall, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LazyCompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LazyRecompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(NotifyOSR, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
- V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
- V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
- V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
- V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \
- V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
- V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
+ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
\
- V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
- V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
+ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
\
- V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
- V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
- V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
- V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC) \
+ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
+ V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
+ StoreIC::kStoreICStrict) \
+ V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
+ StoreIC::kStoreICStrict) \
+ V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
+ StoreIC::kStoreICStrict) \
+ V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
+ StoreIC::kStoreICStrict) \
+ V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
+ StoreIC::kStoreICStrict) \
\
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalUnsignedByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalUnsignedIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalFloatArray, KEYED_STORE_IC, MEGAMORPHIC) \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
- V(FunctionCall, BUILTIN, UNINITIALIZED) \
- V(FunctionApply, BUILTIN, UNINITIALIZED) \
+ V(FunctionCall, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(FunctionApply, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
- V(ArrayCode, BUILTIN, UNINITIALIZED) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
- V(StringConstructCode, BUILTIN, UNINITIALIZED) \
+ V(StringConstructCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
\
- V(OnStackReplacement, BUILTIN, UNINITIALIZED)
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
- V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \
- V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK)
+ V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@@ -199,7 +248,7 @@
enum Name {
#define DEF_ENUM_C(name, ignore) name,
-#define DEF_ENUM_A(name, kind, state) name,
+#define DEF_ENUM_A(name, kind, state, extra) name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 69f8477..ba77b21 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -32,7 +32,6 @@
#include "factory.h"
#include "gdb-jit.h"
#include "macro-assembler.h"
-#include "oprofile-agent.h"
namespace v8 {
namespace internal {
@@ -63,9 +62,6 @@
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
- OPROFILE(CreateNativeCodeRegion(GetName(),
- code->instruction_start(),
- code->instruction_size()));
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters::total_stubs_code_size.Increment(code->instruction_size());
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 76f29f0..0d0e37f 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -59,6 +59,7 @@
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
+ V(ToNumber) \
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
@@ -74,7 +75,8 @@
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
- V(RegExpCEntry)
+ V(RegExpCEntry) \
+ V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
#endif
@@ -260,6 +262,19 @@
};
+class ToNumberStub: public CodeStub {
+ public:
+ ToNumberStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToNumber; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "ToNumberStub"; }
+};
+
+
class FastNewClosureStub : public CodeStub {
public:
void Generate(MacroAssembler* masm);
@@ -599,8 +614,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int alignment_skew = 0);
+ bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index 6534e7f..5467789 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -55,6 +55,10 @@
Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+StrictModeFlag CodeGenerator::strict_mode_flag() {
+ return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode;
+}
+
} } // namespace v8::internal
#endif // V8_CODEGEN_INL_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index c7e6f1c..e6fcecd 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -31,7 +31,6 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "oprofile-agent.h"
#include "prettyprinter.h"
#include "register-allocator-inl.h"
#include "rewriter.h"
@@ -267,7 +266,7 @@
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (!code.is_null()) {
+ if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 38438cb..cccb7a4 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -136,7 +136,8 @@
: CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
- Handle<Context> context);
+ Handle<Context> context,
+ StrictModeFlag strict_mode);
void Put(Handle<String> source,
Handle<Context> context,
@@ -371,7 +372,9 @@
Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
- Handle<String> source, Handle<Context> context) {
+ Handle<String> source,
+ Handle<Context> context,
+ StrictModeFlag strict_mode) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
@@ -380,7 +383,7 @@
{ HandleScope scope;
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- result = table->LookupEval(*source, *context);
+ result = table->LookupEval(*source, *context, strict_mode);
if (result->IsSharedFunctionInfo()) {
break;
}
@@ -503,18 +506,20 @@
}
-Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source,
- Handle<Context> context,
- bool is_global) {
+Handle<SharedFunctionInfo> CompilationCache::LookupEval(
+ Handle<String> source,
+ Handle<Context> context,
+ bool is_global,
+ StrictModeFlag strict_mode) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
Handle<SharedFunctionInfo> result;
if (is_global) {
- result = eval_global.Lookup(source, context);
+ result = eval_global.Lookup(source, context, strict_mode);
} else {
- result = eval_contextual.Lookup(source, context);
+ result = eval_contextual.Lookup(source, context, strict_mode);
}
return result;
}
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 37e21be..f779a23 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -51,7 +51,8 @@
// contain a script for the given source string.
static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
- bool is_global);
+ bool is_global,
+ StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
diff --git a/src/compiler.cc b/src/compiler.cc
index bbe7f2f..ae7b2b9 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -37,9 +37,8 @@
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
-#include "lithium-allocator.h"
+#include "lithium.h"
#include "liveedit.h"
-#include "oprofile-agent.h"
#include "parser.h"
#include "rewriter.h"
#include "runtime-profiler.h"
@@ -289,6 +288,11 @@
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
+ if (Top::has_pending_exception()) {
+ info->SetCode(Handle<Code>::null());
+ return false;
+ }
+
if (graph != NULL && FLAG_build_lithium) {
Handle<Code> code = graph->Compile();
if (!code.is_null()) {
@@ -419,9 +423,6 @@
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
String::cast(script->name())));
- OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
- info->code()->instruction_start(),
- info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code()));
@@ -432,9 +433,6 @@
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
""));
- OPROFILE(CreateNativeCodeRegion(info->is_eval() ? "Eval" : "Script",
- info->code()->instruction_start(),
- info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
@@ -548,7 +546,8 @@
Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
- bool is_global) {
+ bool is_global,
+ StrictModeFlag strict_mode) {
int source_length = source->length();
Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
@@ -559,7 +558,10 @@
// Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result;
- result = CompilationCache::LookupEval(source, context, is_global);
+ result = CompilationCache::LookupEval(source,
+ context,
+ is_global,
+ strict_mode);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
@@ -567,9 +569,14 @@
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
+ if (strict_mode == kStrictMode) info.MarkAsStrict();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
+ // If caller is strict mode, the result must be strict as well,
+ // but not the other way around. Consider:
+ // eval("'use strict'; ...");
+ ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
CompilationCache::PutEval(source, context, is_global, result);
}
}
@@ -599,7 +606,9 @@
// Compile the code.
if (!MakeCode(info)) {
- Top::StackOverflow();
+ if (!Top::has_pending_exception()) {
+ Top::StackOverflow();
+ }
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
@@ -762,6 +771,7 @@
*lit->this_property_assignments());
function_info->set_try_full_codegen(lit->try_full_codegen());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ function_info->set_strict_mode(lit->strict_mode());
}
@@ -773,7 +783,6 @@
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (Logger::is_logging() ||
- OProfileAgent::is_enabled() ||
CpuProfiler::is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
@@ -785,18 +794,10 @@
*name,
String::cast(script->name()),
line_num));
- OPROFILE(CreateNativeCodeRegion(*name,
- String::cast(script->name()),
- line_num,
- code->instruction_start(),
- code->instruction_size()));
} else {
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*name));
- OPROFILE(CreateNativeCodeRegion(*name,
- code->instruction_start(),
- code->instruction_size()));
}
}
diff --git a/src/compiler.h b/src/compiler.h
index 44ac9c8..239bea3 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -49,6 +49,7 @@
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
+ bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@@ -69,6 +70,12 @@
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
+ void MarkAsStrict() {
+ flags_ |= IsStrict::encode(true);
+ }
+ StrictModeFlag StrictMode() {
+ return is_strict() ? kStrictMode : kNonStrictMode;
+ }
void MarkAsInLoop() {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
@@ -145,6 +152,9 @@
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+ if (!shared_info_.is_null() && shared_info_->strict_mode()) {
+ MarkAsStrict();
+ }
}
void SetMode(Mode mode) {
@@ -162,6 +172,8 @@
class IsGlobal: public BitField<bool, 2, 1> {};
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
+ // Strict mode - used in eager compilation.
+ class IsStrict: public BitField<bool, 4, 1> {};
unsigned flags_;
@@ -230,7 +242,8 @@
// Compile a String source within a context for Eval.
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context,
- bool is_global);
+ bool is_global,
+ StrictModeFlag strict_mode);
// Compile from function info (used for lazy compilation). Returns true on
// success and false if the compilation resulted in a stack overflow.
diff --git a/src/conversions.cc b/src/conversions.cc
index a954d6c..a348235 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -125,8 +125,8 @@
}
-static double SignedZero(bool sign) {
- return sign ? -0.0 : 0.0;
+static double SignedZero(bool negative) {
+ return negative ? -0.0 : 0.0;
}
@@ -134,14 +134,14 @@
template <int radix_log_2, class Iterator, class EndMark>
static double InternalStringToIntDouble(Iterator current,
EndMark end,
- bool sign,
+ bool negative,
bool allow_trailing_junk) {
ASSERT(current != end);
// Skip leading 0s.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
}
int64_t number = 0;
@@ -217,7 +217,7 @@
ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
if (exponent == 0) {
- if (sign) {
+ if (negative) {
if (number == 0) return -0.0;
number = -number;
}
@@ -227,7 +227,7 @@
ASSERT(number != 0);
// The double could be constructed faster from number (mantissa), exponent
// and sign. Assuming it's a rare case more simple code is used.
- return static_cast<double>(sign ? -number : number) * pow(2.0, exponent);
+ return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
}
@@ -238,7 +238,7 @@
if (!AdvanceToNonspace(¤t, end)) return empty_string_val;
- bool sign = false;
+ bool negative = false;
bool leading_zero = false;
if (*current == '+') {
@@ -248,14 +248,14 @@
} else if (*current == '-') {
++current;
if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
- sign = true;
+ negative = true;
}
if (radix == 0) {
// Radix detection.
if (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') {
radix = 16;
++current;
@@ -271,7 +271,7 @@
if (*current == '0') {
// Allow "0x" prefix.
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') {
++current;
if (current == end) return JUNK_STRING_VALUE;
@@ -287,7 +287,7 @@
while (*current == '0') {
leading_zero = true;
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
}
if (!leading_zero && !isDigit(*current, radix)) {
@@ -298,21 +298,21 @@
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 4:
return InternalStringToIntDouble<2>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 8:
return InternalStringToIntDouble<3>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 16:
return InternalStringToIntDouble<4>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
case 32:
return InternalStringToIntDouble<5>(
- current, end, sign, allow_trailing_junk);
+ current, end, negative, allow_trailing_junk);
default:
UNREACHABLE();
}
@@ -344,7 +344,7 @@
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
- return sign ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
+ return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
}
// The following code causes accumulating rounding error for numbers greater
@@ -406,7 +406,7 @@
return JUNK_STRING_VALUE;
}
- return sign ? -v : v;
+ return negative ? -v : v;
}
@@ -445,7 +445,7 @@
bool nonzero_digit_dropped = false;
bool fractional_part = false;
- bool sign = false;
+ bool negative = false;
if (*current == '+') {
// Ignore leading sign.
@@ -454,7 +454,7 @@
} else if (*current == '-') {
++current;
if (current == end) return JUNK_STRING_VALUE;
- sign = true;
+ negative = true;
}
static const char kInfinitySymbol[] = "Infinity";
@@ -468,13 +468,13 @@
}
ASSERT(buffer_pos == 0);
- return sign ? -V8_INFINITY : V8_INFINITY;
+ return negative ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
leading_zero = true;
@@ -487,14 +487,14 @@
return InternalStringToIntDouble<4>(current,
end,
- sign,
+ negative,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
}
}
@@ -539,7 +539,7 @@
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign);
+ if (current == end) return SignedZero(negative);
exponent--; // Move this 0 into the exponent.
}
}
@@ -631,7 +631,7 @@
if (octal) {
return InternalStringToIntDouble<3>(buffer,
buffer + buffer_pos,
- sign,
+ negative,
allow_trailing_junk);
}
@@ -644,7 +644,7 @@
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return sign ? -converted : converted;
+ return negative ? -converted : converted;
}
@@ -702,26 +702,12 @@
const char* DoubleToCString(double v, Vector<char> buffer) {
- StringBuilder builder(buffer.start(), buffer.length());
-
switch (fpclassify(v)) {
- case FP_NAN:
- builder.AddString("NaN");
- break;
-
- case FP_INFINITE:
- if (v < 0.0) {
- builder.AddString("-Infinity");
- } else {
- builder.AddString("Infinity");
- }
- break;
-
- case FP_ZERO:
- builder.AddCharacter('0');
- break;
-
+ case FP_NAN: return "NaN";
+ case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
+ case FP_ZERO: return "0";
default: {
+ StringBuilder builder(buffer.start(), buffer.length());
int decimal_point;
int sign;
const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@@ -764,9 +750,9 @@
if (exponent < 0) exponent = -exponent;
builder.AddFormatted("%d", exponent);
}
+ return builder.Finalize();
}
}
- return builder.Finalize();
}
diff --git a/src/date.js b/src/date.js
index 9eb607c..1fb4897 100644
--- a/src/date.js
+++ b/src/date.js
@@ -605,7 +605,7 @@
// ECMA 262 - 15.9.5.5
function DateToLocaleString() {
- return DateToString.call(this);
+ return %_CallFunction(this, DateToString);
}
@@ -973,7 +973,7 @@
// do that either. Instead, we create a new function whose name
// property will return toGMTString.
function DateToGMTString() {
- return DateToUTCString.call(this);
+ return %_CallFunction(this, DateToUTCString);
}
diff --git a/src/debug.cc b/src/debug.cc
index 8ec77e7..d8201a1 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -835,7 +835,9 @@
// Expose the builtins object in the debugger context.
Handle<String> key = Factory::LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
- SetProperty(global, key, Handle<Object>(global->builtins()), NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(global, key, Handle<Object>(global->builtins()), NONE),
+ false);
// Compile the JavaScript for the debugger in the debugger context.
Debugger::set_compiling_natives(true);
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index a3d2002..af2f42e 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -663,7 +663,7 @@
case Translation::REGISTER: {
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+ PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
input_value,
*input_offset);
@@ -690,7 +690,7 @@
return false;
}
if (FLAG_trace_osr) {
- PrintF(" %s <- %d (int32) ; [esp + %d]\n",
+ PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
int32_value,
*input_offset);
@@ -706,7 +706,7 @@
int output_reg = iterator->Next();
double double_value = input_object->Number();
if (FLAG_trace_osr) {
- PrintF(" %s <- %g (double) ; [esp + %d]\n",
+ PrintF(" %s <- %g (double) ; [sp + %d]\n",
DoubleRegister::AllocationIndexToString(output_reg),
double_value,
*input_offset);
@@ -720,7 +720,7 @@
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+ PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
output_offset,
input_value,
*input_offset);
@@ -749,7 +749,7 @@
return false;
}
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- %d (int32) ; [esp + %d]\n",
+ PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
int32_value,
*input_offset);
@@ -773,12 +773,12 @@
int32_t lower = static_cast<int32_t>(int_value);
int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n",
+ PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
output_offset + kUpperOffset,
upper,
double_value,
*input_offset);
- PrintF(" [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n",
+ PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
output_offset + kLowerOffset,
lower,
double_value,
@@ -810,6 +810,44 @@
}
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code) {
+ // Iterate over the stack check table and patch every stack check
+ // call to an unconditional call to the replacement code.
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Address stack_check_cursor = unoptimized_code->instruction_start() +
+ unoptimized_code->stack_check_table_offset();
+ uint32_t table_length = Memory::uint32_at(stack_check_cursor);
+ stack_check_cursor += kIntSize;
+ for (uint32_t i = 0; i < table_length; ++i) {
+ uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
+ Address pc_after = unoptimized_code->instruction_start() + pc_offset;
+ PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
+ stack_check_cursor += 2 * kIntSize;
+ }
+}
+
+
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code) {
+ // Iterate over the stack check table and revert the patched
+ // stack check calls.
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Address stack_check_cursor = unoptimized_code->instruction_start() +
+ unoptimized_code->stack_check_table_offset();
+ uint32_t table_length = Memory::uint32_at(stack_check_cursor);
+ stack_check_cursor += kIntSize;
+ for (uint32_t i = 0; i < table_length; ++i) {
+ uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
+ Address pc_after = unoptimized_code->instruction_start() + pc_offset;
+ RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
+ stack_check_cursor += 2 * kIntSize;
+ }
+}
+
+
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index f9bf280..1d4f477 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -128,14 +128,32 @@
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
- // Given the relocation info of a call to the stack check stub, patch the
- // code so as to go unconditionally to the on-stack replacement builtin
- // instead.
- static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
+ // The size in bytes of the code required at a lazy deopt patch site.
+ static int patch_size();
- // Given the relocation info of a call to the on-stack replacement
- // builtin, patch the code back to the original stack check code.
- static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
+ // Patch all stack guard checks in the unoptimized code to
+ // unconditionally call replacement_code.
+ static void PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code);
+
+ // Patch stack guard check at instruction before pc_after in
+ // the unoptimized code to unconditionally call replacement_code.
+ static void PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code);
+
+ // Change all patched stack guard checks in the unoptimized code
+ // back to a normal stack guard check.
+ static void RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code);
+
+ // Change all patched stack guard checks in the unoptimized code
+ // back to a normal stack guard check.
+ static void RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code);
~Deoptimizer();
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 194a299..243abf0 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -313,12 +313,12 @@
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
- ? static_cast<int>(code->safepoint_table_start())
+ ? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
- Min(decode_size, static_cast<int>(code->stack_check_table_start()));
+ Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
}
byte* begin = code->instruction_start();
diff --git a/src/execution.cc b/src/execution.cc
index 11dacfe..f484d8d 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -403,6 +403,7 @@
if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
+ const uintptr_t kLimitSize = FLAG_stack_size * KB;
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
diff --git a/src/execution.h b/src/execution.h
index af8ad9a..cb07807 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -243,8 +243,6 @@
static void EnableInterrupts();
static void DisableInterrupts();
- static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
-
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
index 73888fc..4d7a936 100644
--- a/src/extensions/experimental/experimental.gyp
+++ b/src/extensions/experimental/experimental.gyp
@@ -27,7 +27,10 @@
{
'variables': {
- 'icu_src_dir%': '',
+ # TODO(cira): Find out how to pass this value for arbitrary embedder.
+ # Chromium sets it in common.gypi and does force include of that file for
+ # all sub projects.
+ 'icu_src_dir%': '../../../../third_party/icu',
},
'targets': [
{
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
index 22a1c91..a721ba5 100644
--- a/src/extensions/experimental/i18n-extension.cc
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -39,8 +39,9 @@
I18NExtension* I18NExtension::extension_ = NULL;
// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
+// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
const char* const I18NExtension::kSource =
- "Locale = function(optLocale) {"
+ "v8Locale = function(optLocale) {"
" native function NativeJSLocale();"
" var properties = NativeJSLocale(optLocale);"
" this.locale = properties.locale;"
@@ -48,41 +49,41 @@
" this.script = properties.script;"
" this.region = properties.region;"
"};"
- "Locale.availableLocales = function() {"
+ "v8Locale.availableLocales = function() {"
" native function NativeJSAvailableLocales();"
" return NativeJSAvailableLocales();"
"};"
- "Locale.prototype.maximizedLocale = function() {"
+ "v8Locale.prototype.maximizedLocale = function() {"
" native function NativeJSMaximizedLocale();"
- " return new Locale(NativeJSMaximizedLocale(this.locale));"
+ " return new v8Locale(NativeJSMaximizedLocale(this.locale));"
"};"
- "Locale.prototype.minimizedLocale = function() {"
+ "v8Locale.prototype.minimizedLocale = function() {"
" native function NativeJSMinimizedLocale();"
- " return new Locale(NativeJSMinimizedLocale(this.locale));"
+ " return new v8Locale(NativeJSMinimizedLocale(this.locale));"
"};"
- "Locale.prototype.displayLocale_ = function(displayLocale) {"
+ "v8Locale.prototype.displayLocale_ = function(displayLocale) {"
" var result = this.locale;"
" if (displayLocale !== undefined) {"
" result = displayLocale.locale;"
" }"
" return result;"
"};"
- "Locale.prototype.displayLanguage = function(optDisplayLocale) {"
+ "v8Locale.prototype.displayLanguage = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayLanguage();"
" return NativeJSDisplayLanguage(this.locale, displayLocale);"
"};"
- "Locale.prototype.displayScript = function(optDisplayLocale) {"
+ "v8Locale.prototype.displayScript = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayScript();"
" return NativeJSDisplayScript(this.locale, displayLocale);"
"};"
- "Locale.prototype.displayRegion = function(optDisplayLocale) {"
+ "v8Locale.prototype.displayRegion = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayRegion();"
" return NativeJSDisplayRegion(this.locale, displayLocale);"
"};"
- "Locale.prototype.displayName = function(optDisplayLocale) {"
+ "v8Locale.prototype.displayName = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayName();"
" return NativeJSDisplayName(this.locale, displayLocale);"
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index b8f081c..63daa05 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -40,8 +40,12 @@
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
+ bool compact = false;
// All allocation spaces other than NEW_SPACE have the same effect.
- Heap::CollectAllGarbage(false);
+ if (args.Length() >= 1 && args[0]->IsBoolean()) {
+ compact = args[0]->BooleanValue();
+ }
+ Heap::CollectAllGarbage(compact);
return v8::Undefined();
}
diff --git a/src/factory.cc b/src/factory.cc
index 2bc878c..96c757a 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -334,6 +334,11 @@
}
+Handle<Map> Factory::GetPixelArrayElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->GetPixelArrayElementsMap(), Map);
+}
+
+
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
}
@@ -580,7 +585,9 @@
// Set function.prototype and give the prototype a constructor
// property that refers to the function.
SetPrototypeProperty(function, prototype);
- SetProperty(prototype, Factory::constructor_symbol(), function, DONT_ENUM);
+ // Currently safe because it is only invoked from Genesis.
+ SetLocalPropertyNoThrow(
+ prototype, Factory::constructor_symbol(), function, DONT_ENUM);
return function;
}
@@ -755,6 +762,24 @@
}
+Handle<JSMessageObject> Factory::NewJSMessageObject(
+ Handle<String> type,
+ Handle<JSArray> arguments,
+ int start_position,
+ int end_position,
+ Handle<Object> script,
+ Handle<Object> stack_trace,
+ Handle<Object> stack_frames) {
+ CALL_HEAP_FUNCTION(Heap::AllocateJSMessageObject(*type,
+ *arguments,
+ start_position,
+ end_position,
+ *script,
+ *stack_trace,
+ *stack_frames),
+ JSMessageObject);
+}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
SharedFunctionInfo);
diff --git a/src/factory.h b/src/factory.h
index a5e1591..7547f7c 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -196,6 +196,8 @@
static Handle<Map> GetSlowElementsMap(Handle<Map> map);
+ static Handle<Map> GetPixelArrayElementsMap(Handle<Map> map);
+
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.
@@ -365,6 +367,15 @@
Handle<SerializedScopeInfo> scope_info);
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
+ static Handle<JSMessageObject> NewJSMessageObject(
+ Handle<String> type,
+ Handle<JSArray> arguments,
+ int start_position,
+ int end_position,
+ Handle<Object> script,
+ Handle<Object> stack_trace,
+ Handle<Object> stack_frames);
+
static Handle<NumberDictionary> DictionaryAtNumberPut(
Handle<NumberDictionary>,
uint32_t key,
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index fb892d6..57defdc 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -231,6 +231,10 @@
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
+// execution.cc
+DEFINE_int(stack_size, kPointerSize * 128,
+ "default size of stack region v8 is allowed to use (in KkBytes)")
+
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
"maximum length of function source code printed in a stack trace.")
@@ -301,6 +305,7 @@
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_bool(strict_mode, true, "allow strict mode directives")
// rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast")
@@ -492,7 +497,6 @@
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
-DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
//
diff --git a/src/frames.cc b/src/frames.cc
index 7f28ff1..24ea8dd 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -695,7 +695,7 @@
ASSERT(frames->length() == 0);
ASSERT(is_optimized());
- int deopt_index = AstNode::kNoNumber;
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
// BUG(3243555): Since we don't have a lazy-deopt registered at
@@ -793,7 +793,7 @@
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
*deopt_index = safepoint_entry.deoptimization_index();
- ASSERT(*deopt_index != AstNode::kNoNumber);
+ ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
return DeoptimizationInputData::cast(code->deoptimization_data());
}
@@ -803,7 +803,7 @@
ASSERT(functions->length() == 0);
ASSERT(is_optimized());
- int deopt_index = AstNode::kNoNumber;
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 9366e42..252fb92 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -304,11 +304,11 @@
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_allow_osr_at_loop_nesting_level(0);
- code->set_stack_check_table_start(table_offset);
+ code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (!code.is_null()) {
+ if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
@@ -913,7 +913,7 @@
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
- PrepareForBailoutForId(stmt->EntryId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
__ bind(nested_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 0482ee8..655e560 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -531,6 +531,10 @@
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
+ bool is_strict() { return function()->strict_mode(); }
+ StrictModeFlag strict_mode_flag() {
+ return is_strict() ? kStrictMode : kNonStrictMode;
+ }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
@@ -541,7 +545,8 @@
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Calling an IC stub with a patch site. Passing NULL for patch_site
- // indicates no inlined smi code and emits a nop after the IC call.
+ // or non NULL patch_site which is not activated indicates no inlined smi code
+ // and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
// Set fields in the stack frame. Offsets are the frame pointer relative
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index b1782cb..c26ecf5 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -201,6 +201,7 @@
TYPE_SHLIB = 10,
TYPE_DYNSYM = 11,
TYPE_LOPROC = 0x70000000,
+ TYPE_X86_64_UNWIND = 0x70000001,
TYPE_HIPROC = 0x7fffffff,
TYPE_LOUSER = 0x80000000,
TYPE_HIUSER = 0xffffffff
@@ -639,26 +640,53 @@
class CodeDescription BASE_EMBEDDED {
public:
+
+#ifdef V8_TARGET_ARCH_X64
+ enum StackState {
+ POST_RBP_PUSH,
+ POST_RBP_SET,
+ POST_RBP_POP,
+ STACK_STATE_MAX
+ };
+#endif
+
CodeDescription(const char* name,
Code* code,
Handle<Script> script,
- GDBJITLineInfo* lineinfo)
- : name_(name), code_(code), script_(script), lineinfo_(lineinfo)
- { }
+ GDBJITLineInfo* lineinfo,
+ GDBJITInterface::CodeTag tag)
+ : name_(name),
+ code_(code),
+ script_(script),
+ lineinfo_(lineinfo),
+ tag_(tag) {
+ }
- const char* code_name() const {
+ const char* name() const {
return name_;
}
- uintptr_t code_size() const {
- return code_->instruction_end() - code_->instruction_start();
+ GDBJITLineInfo* lineinfo() const {
+ return lineinfo_;
}
- uintptr_t code_start() const {
- return (uintptr_t)code_->instruction_start();
+ GDBJITInterface::CodeTag tag() const {
+ return tag_;
}
- bool is_line_info_available() {
+ uintptr_t CodeStart() const {
+ return reinterpret_cast<uintptr_t>(code_->instruction_start());
+ }
+
+ uintptr_t CodeEnd() const {
+ return reinterpret_cast<uintptr_t>(code_->instruction_end());
+ }
+
+ uintptr_t CodeSize() const {
+ return CodeEnd() - CodeStart();
+ }
+
+ bool IsLineInfoAvailable() {
return !script_.is_null() &&
script_->source()->IsString() &&
script_->HasValidSource() &&
@@ -666,9 +694,19 @@
lineinfo_ != NULL;
}
- GDBJITLineInfo* lineinfo() const { return lineinfo_; }
+#ifdef V8_TARGET_ARCH_X64
+ uintptr_t GetStackStateStartAddress(StackState state) const {
+ ASSERT(state < STACK_STATE_MAX);
+ return stack_state_start_addresses_[state];
+ }
- SmartPointer<char> filename() {
+ void SetStackStateStartAddress(StackState state, uintptr_t addr) {
+ ASSERT(state < STACK_STATE_MAX);
+ stack_state_start_addresses_[state] = addr;
+ }
+#endif
+
+ SmartPointer<char> GetFilename() {
return String::cast(script_->name())->ToCString();
}
@@ -676,11 +714,16 @@
return GetScriptLineNumberSafe(script_, pos) + 1;
}
+
private:
const char* name_;
Code* code_;
Handle<Script> script_;
GDBJITLineInfo* lineinfo_;
+ GDBJITInterface::CodeTag tag_;
+#ifdef V8_TARGET_ARCH_X64
+ uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
+#endif
};
@@ -701,9 +744,9 @@
ELFSymbol::TYPE_FILE,
ELFSection::INDEX_ABSOLUTE));
- symtab->Add(ELFSymbol(desc->code_name(),
+ symtab->Add(ELFSymbol(desc->name(),
0,
- desc->code_size(),
+ desc->CodeSize(),
ELFSymbol::BIND_GLOBAL,
ELFSymbol::TYPE_FUNC,
text_section_index));
@@ -723,9 +766,9 @@
w->Write<uint8_t>(sizeof(intptr_t));
w->WriteULEB128(1); // Abbreviation code.
- w->WriteString(*desc_->filename());
- w->Write<intptr_t>(desc_->code_start());
- w->Write<intptr_t>(desc_->code_start() + desc_->code_size());
+ w->WriteString(*desc_->GetFilename());
+ w->Write<intptr_t>(desc_->CodeStart());
+ w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
w->Write<uint32_t>(0);
size.set(static_cast<uint32_t>(w->position() - start));
return true;
@@ -829,7 +872,7 @@
w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
w->Write<uint8_t>(0); // Empty include_directories sequence.
- w->WriteString(*desc_->filename()); // File name.
+ w->WriteString(*desc_->GetFilename()); // File name.
w->WriteULEB128(0); // Current directory.
w->WriteULEB128(0); // Unknown modification time.
w->WriteULEB128(0); // Unknown file size.
@@ -837,7 +880,7 @@
prologue_length.set(static_cast<uint32_t>(w->position() - prologue_start));
WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
- w->Write<intptr_t>(desc_->code_start());
+ w->Write<intptr_t>(desc_->CodeStart());
intptr_t pc = 0;
intptr_t line = 1;
@@ -900,12 +943,238 @@
};
+#ifdef V8_TARGET_ARCH_X64
+
+
+class UnwindInfoSection : public ELFSection {
+ public:
+ explicit UnwindInfoSection(CodeDescription *desc);
+ virtual bool WriteBody(Writer *w);
+
+ int WriteCIE(Writer *w);
+ void WriteFDE(Writer *w, int);
+
+ void WriteFDEStateOnEntry(Writer *w);
+ void WriteFDEStateAfterRBPPush(Writer *w);
+ void WriteFDEStateAfterRBPSet(Writer *w);
+ void WriteFDEStateAfterRBPPop(Writer *w);
+
+ void WriteLength(Writer *w,
+ Writer::Slot<uint32_t>* length_slot,
+ int initial_position);
+
+ private:
+ CodeDescription *desc_;
+
+ // DWARF3 Specification, Table 7.23
+ enum CFIInstructions {
+ DW_CFA_ADVANCE_LOC = 0x40,
+ DW_CFA_OFFSET = 0x80,
+ DW_CFA_RESTORE = 0xC0,
+ DW_CFA_NOP = 0x00,
+ DW_CFA_SET_LOC = 0x01,
+ DW_CFA_ADVANCE_LOC1 = 0x02,
+ DW_CFA_ADVANCE_LOC2 = 0x03,
+ DW_CFA_ADVANCE_LOC4 = 0x04,
+ DW_CFA_OFFSET_EXTENDED = 0x05,
+ DW_CFA_RESTORE_EXTENDED = 0x06,
+ DW_CFA_UNDEFINED = 0x07,
+ DW_CFA_SAME_VALUE = 0x08,
+ DW_CFA_REGISTER = 0x09,
+ DW_CFA_REMEMBER_STATE = 0x0A,
+ DW_CFA_RESTORE_STATE = 0x0B,
+ DW_CFA_DEF_CFA = 0x0C,
+ DW_CFA_DEF_CFA_REGISTER = 0x0D,
+ DW_CFA_DEF_CFA_OFFSET = 0x0E,
+
+ DW_CFA_DEF_CFA_EXPRESSION = 0x0F,
+ DW_CFA_EXPRESSION = 0x10,
+ DW_CFA_OFFSET_EXTENDED_SF = 0x11,
+ DW_CFA_DEF_CFA_SF = 0x12,
+ DW_CFA_DEF_CFA_OFFSET_SF = 0x13,
+ DW_CFA_VAL_OFFSET = 0x14,
+ DW_CFA_VAL_OFFSET_SF = 0x15,
+ DW_CFA_VAL_EXPRESSION = 0x16
+ };
+
+ // System V ABI, AMD64 Supplement, Version 0.99.5, Figure 3.36
+ enum RegisterMapping {
+ // Only the relevant ones have been added to reduce clutter.
+ AMD64_RBP = 6,
+ AMD64_RSP = 7,
+ AMD64_RA = 16
+ };
+
+ enum CFIConstants {
+ CIE_ID = 0,
+ CIE_VERSION = 1,
+ CODE_ALIGN_FACTOR = 1,
+ DATA_ALIGN_FACTOR = 1,
+ RETURN_ADDRESS_REGISTER = AMD64_RA
+ };
+};
+
+
+void UnwindInfoSection::WriteLength(Writer *w,
+ Writer::Slot<uint32_t>* length_slot,
+ int initial_position) {
+ uint32_t align = (w->position() - initial_position) % kPointerSize;
+
+ if (align != 0) {
+ for (uint32_t i = 0; i < (kPointerSize - align); i++) {
+ w->Write<uint8_t>(DW_CFA_NOP);
+ }
+ }
+
+ ASSERT((w->position() - initial_position) % kPointerSize == 0);
+ length_slot->set(w->position() - initial_position);
+}
+
+
+UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
+ : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), desc_(desc)
+{ }
+
+int UnwindInfoSection::WriteCIE(Writer *w) {
+ Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
+ uint32_t cie_position = w->position();
+
+ // Write out the CIE header. Currently no 'common instructions' are
+ // emitted onto the CIE; every FDE has its own set of instructions.
+
+ w->Write<uint32_t>(CIE_ID);
+ w->Write<uint8_t>(CIE_VERSION);
+ w->Write<uint8_t>(0); // Null augmentation string.
+ w->WriteSLEB128(CODE_ALIGN_FACTOR);
+ w->WriteSLEB128(DATA_ALIGN_FACTOR);
+ w->Write<uint8_t>(RETURN_ADDRESS_REGISTER);
+
+ WriteLength(w, &cie_length_slot, cie_position);
+
+ return cie_position;
+}
+
+
+void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
+ // The only FDE for this function. The CFA is the current RBP.
+ Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
+ int fde_position = w->position();
+ w->Write<int32_t>(fde_position - cie_position + 4);
+
+ w->Write<uintptr_t>(desc_->CodeStart());
+ w->Write<uintptr_t>(desc_->CodeSize());
+
+ WriteFDEStateOnEntry(w);
+ WriteFDEStateAfterRBPPush(w);
+ WriteFDEStateAfterRBPSet(w);
+ WriteFDEStateAfterRBPPop(w);
+
+ WriteLength(w, &fde_length_slot, fde_position);
+}
+
+
+void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
+ // The first state, just after the control has been transferred to the the
+ // function.
+
+ // RBP for this function will be the value of RSP after pushing the RBP
+ // for the previous function. The previous RBP has not been pushed yet.
+ w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
+ w->WriteULEB128(AMD64_RSP);
+ w->WriteSLEB128(-kPointerSize);
+
+ // The RA is stored at location CFA + kCallerPCOffset. This is an invariant,
+ // and hence omitted from the next states.
+ w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
+ w->WriteULEB128(AMD64_RA);
+ w->WriteSLEB128(StandardFrameConstants::kCallerPCOffset);
+
+ // The RBP of the previous function is still in RBP.
+ w->Write<uint8_t>(DW_CFA_SAME_VALUE);
+ w->WriteULEB128(AMD64_RBP);
+
+ // Last location described by this entry.
+ w->Write<uint8_t>(DW_CFA_SET_LOC);
+ w->Write<uint64_t>(
+ desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_PUSH));
+}
+
+
+void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
+ // The second state, just after RBP has been pushed.
+
+ // RBP / CFA for this function is now the current RSP, so just set the
+ // offset from the previous rule (from -8) to 0.
+ w->Write<uint8_t>(DW_CFA_DEF_CFA_OFFSET);
+ w->WriteULEB128(0);
+
+ // The previous RBP is stored at CFA + kCallerFPOffset. This is an invariant
+ // in this and the next state, and hence omitted in the next state.
+ w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
+ w->WriteULEB128(AMD64_RBP);
+ w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset);
+
+ // Last location described by this entry.
+ w->Write<uint8_t>(DW_CFA_SET_LOC);
+ w->Write<uint64_t>(
+ desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_SET));
+}
+
+
+void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
+ // The third state, after the RBP has been set.
+
+ // The CFA can now directly be set to RBP.
+ w->Write<uint8_t>(DW_CFA_DEF_CFA);
+ w->WriteULEB128(AMD64_RBP);
+ w->WriteULEB128(0);
+
+ // Last location described by this entry.
+ w->Write<uint8_t>(DW_CFA_SET_LOC);
+ w->Write<uint64_t>(
+ desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_POP));
+}
+
+
+void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
+ // The fourth (final) state. The RBP has been popped (just before issuing a
+ // return).
+
+ // The CFA can is now calculated in the same way as in the first state.
+ w->Write<uint8_t>(DW_CFA_DEF_CFA_SF);
+ w->WriteULEB128(AMD64_RSP);
+ w->WriteSLEB128(-kPointerSize);
+
+ // The RBP
+ w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED);
+ w->WriteULEB128(AMD64_RBP);
+ w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset);
+
+ // Last location described by this entry.
+ w->Write<uint8_t>(DW_CFA_SET_LOC);
+ w->Write<uint64_t>(desc_->CodeEnd());
+}
+
+
+bool UnwindInfoSection::WriteBody(Writer *w) {
+ uint32_t cie_position = WriteCIE(w);
+ WriteFDE(w, cie_position);
+ return true;
+}
+
+
+#endif // V8_TARGET_ARCH_X64
+
+
static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
- if (desc->is_line_info_available()) {
+ if (desc->IsLineInfoAvailable()) {
elf->AddSection(new DebugInfoSection(desc));
elf->AddSection(new DebugAbbrevSection);
elf->AddSection(new DebugLineSection(desc));
}
+#ifdef V8_TARGET_ARCH_X64
+ elf->AddSection(new UnwindInfoSection(desc));
+#endif
}
@@ -1005,9 +1274,9 @@
new FullHeaderELFSection(".text",
ELFSection::TYPE_NOBITS,
kCodeAlignment,
- desc->code_start(),
+ desc->CodeStart(),
0,
- desc->code_size(),
+ desc->CodeSize(),
ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
CreateSymbolsTable(desc, &elf, text_section_index);
@@ -1065,15 +1334,66 @@
if (!name.is_null()) {
SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, *script);
+ AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script);
} else {
- AddCode("", *code, *script);
+ AddCode("", *code, GDBJITInterface::FUNCTION, *script);
}
}
+static void AddUnwindInfo(CodeDescription *desc) {
+#ifdef V8_TARGET_ARCH_X64
+ if (desc->tag() == GDBJITInterface::FUNCTION) {
+ // To avoid propagating unwinding information through
+ // compilation pipeline we rely on function prologue
+ // and epilogue being the same for all code objects generated
+ // by the full code generator.
+ static const int kFramePointerPushOffset = 1;
+ static const int kFramePointerSetOffset = 4;
+ static const int kFramePointerPopOffset = -3;
+
+ uintptr_t frame_pointer_push_address =
+ desc->CodeStart() + kFramePointerPushOffset;
+
+ uintptr_t frame_pointer_set_address =
+ desc->CodeStart() + kFramePointerSetOffset;
+
+ uintptr_t frame_pointer_pop_address =
+ desc->CodeEnd() + kFramePointerPopOffset;
+
+#ifdef DEBUG
+ static const uint8_t kFramePointerPushInstruction = 0x48; // push ebp
+ static const uint16_t kFramePointerSetInstruction = 0x5756; // mov ebp, esp
+ static const uint8_t kFramePointerPopInstruction = 0xBE; // pop ebp
+
+ ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_push_address) ==
+ kFramePointerPushInstruction);
+ ASSERT(*reinterpret_cast<uint16_t*>(frame_pointer_set_address) ==
+ kFramePointerSetInstruction);
+ ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_pop_address) ==
+ kFramePointerPopInstruction);
+#endif
+
+ desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
+ frame_pointer_push_address);
+ desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
+ frame_pointer_set_address);
+ desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP,
+ frame_pointer_pop_address);
+ } else {
+ desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
+ desc->CodeStart());
+ desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
+ desc->CodeStart());
+ desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP,
+ desc->CodeEnd());
+ }
+#endif // V8_TARGET_ARCH_X64
+}
+
void GDBJITInterface::AddCode(const char* name,
Code* code,
+ GDBJITInterface::CodeTag tag,
Script* script) {
if (!FLAG_gdbjit) return;
AssertNoAllocation no_gc;
@@ -1086,14 +1406,16 @@
code,
script != NULL ? Handle<Script>(script)
: Handle<Script>(),
- lineinfo);
+ lineinfo,
+ tag);
- if (!FLAG_gdbjit_full && !code_desc.is_line_info_available()) {
+ if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
entries.Remove(code, HashForCodeObject(code));
return;
}
+ AddUnwindInfo(&code_desc);
JITCodeEntry* entry = CreateELFObject(&code_desc);
ASSERT(!IsLineInfoTagged(entry));
@@ -1120,7 +1442,7 @@
builder.AddFormatted(": code object %p", static_cast<void*>(code));
}
- AddCode(builder.Finalize(), code);
+ AddCode(builder.Finalize(), code, tag);
}
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index 5d348b6..d46fec6 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -55,7 +55,8 @@
V(STUB) \
V(BUILTIN) \
V(SCRIPT) \
- V(EVAL)
+ V(EVAL) \
+ V(FUNCTION)
class GDBJITLineInfo : public Malloced {
public:
@@ -109,6 +110,7 @@
static void AddCode(const char* name,
Code* code,
+ CodeTag tag,
Script* script = NULL);
static void AddCode(Handle<String> name,
diff --git a/src/handles.cc b/src/handles.cc
index 461c3f5..d625d64 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -290,6 +290,17 @@
}
+void SetLocalPropertyNoThrow(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ ASSERT(!Top::has_pending_exception());
+ CHECK(!SetLocalPropertyIgnoreAttributes(
+ object, key, value, attributes).is_null());
+ CHECK(!Top::has_pending_exception());
+}
+
+
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@@ -808,6 +819,7 @@
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
+ ASSERT(!Top::has_pending_exception());
bool result = Compiler::CompileLazy(info);
ASSERT(result != Top::has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
@@ -873,7 +885,7 @@
int expected_additional_properties,
bool condition) {
object_ = object;
- if (condition && object_->HasFastProperties()) {
+ if (condition && object_->HasFastProperties() && !object->IsJSGlobalProxy()) {
// Normalize the properties of object to avoid n^2 behavior
// when extending the object multiple properties. Indicate the number of
// properties to be added.
diff --git a/src/handles.h b/src/handles.h
index aa9d8b9..d95ca91 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -223,6 +223,13 @@
Handle<Object> value,
PropertyAttributes attributes);
+// Used to set local properties on the object we totally control
+// and which therefore has no accessors and alikes.
+void SetLocalPropertyNoThrow(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes = NONE);
+
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
diff --git a/src/hashmap.h b/src/hashmap.h
index 3b947be..2798988 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -49,7 +49,8 @@
typedef bool (*MatchFun) (void* key1, void* key2);
// Dummy constructor. This constructor doesn't set up the hash
- // map properly so don't use it unless you have good reason.
+ // map properly so don't use it unless you have good reason (e.g.,
+ // you know that the HashMap will never be used).
HashMap();
// initial_capacity is the size of the initial hash map;
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index dfda7c6..732d2f4 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -373,6 +373,7 @@
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
+ Heap::CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
@@ -808,7 +809,7 @@
void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
- HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+ HeapIterator iterator(HeapIterator::kFilterUnreachable);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CollectStats(obj);
agg_snapshot_->js_cons_profile()->CollectStats(obj);
diff --git a/src/heap.cc b/src/heap.cc
index 32d751a..f88ebda 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -35,6 +35,7 @@
#include "debug.h"
#include "heap-profiler.h"
#include "global-handles.h"
+#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -400,6 +401,8 @@
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsBeforeGC();
#endif
+
+ LiveObjectList::GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@@ -412,6 +415,7 @@
}
void Heap::GarbageCollectionEpilogue() {
+ LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
ZapFromSpace();
@@ -1066,6 +1070,8 @@
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ LiveObjectList::UpdateReferencesForScavengeGC();
+
ASSERT(new_space_front == new_space_.top());
// Set age mark.
@@ -1820,6 +1826,12 @@
}
set_shared_function_info_map(Map::cast(obj));
+ { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
+ JSMessageObject::kSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_message_object_map(Map::cast(obj));
+
ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
return true;
}
@@ -1931,6 +1943,14 @@
}
+#if V8_TARGET_ARCH_ARM
+void Heap::CreateDirectCEntryStub() {
+ DirectCEntryStub stub;
+ set_direct_c_entry_code(*stub.GetCode());
+}
+#endif
+
+
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc).
@@ -1951,6 +1971,9 @@
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Heap::CreateRegExpCEntryStub();
#endif
+#if V8_TARGET_ARCH_ARM
+ Heap::CreateDirectCEntryStub();
+#endif
}
@@ -2323,6 +2346,32 @@
}
+MaybeObject* Heap::AllocateJSMessageObject(String* type,
+ JSArray* arguments,
+ int start_position,
+ int end_position,
+ Object* script,
+ Object* stack_trace,
+ Object* stack_frames) {
+ Object* result;
+ { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ JSMessageObject* message = JSMessageObject::cast(result);
+ message->set_properties(Heap::empty_fixed_array());
+ message->set_elements(Heap::empty_fixed_array());
+ message->set_type(type);
+ message->set_arguments(arguments);
+ message->set_start_position(start_position);
+ message->set_end_position(end_position);
+ message->set_script(script);
+ message->set_stack_trace(stack_trace);
+ message->set_stack_frames(stack_frames);
+ return result;
+}
+
+
+
// Returns true for a character in a range. Both limits are inclusive.
static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
// This makes uses of the the unsigned wraparound.
@@ -4079,7 +4128,7 @@
#ifdef DEBUG
void Heap::ZapFromSpace() {
- ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
+ ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
for (Address a = new_space_.FromSpaceLow();
a < new_space_.FromSpaceHigh();
a += kPointerSize) {
diff --git a/src/heap.h b/src/heap.h
index 0d79081..dcd813b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -94,6 +94,7 @@
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
+ V(Map, message_object_map, JSMessageObjectMap) \
V(Map, proxy_map, ProxyMap) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
@@ -121,7 +122,12 @@
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#define STRONG_ROOT_LIST(V) \
UNCONDITIONAL_STRONG_ROOT_LIST(V) \
- V(Code, re_c_entry_code, RegExpCEntryCode)
+ V(Code, re_c_entry_code, RegExpCEntryCode) \
+ V(Code, direct_c_entry_code, DirectCEntryCode)
+#elif V8_TARGET_ARCH_ARM
+#define STRONG_ROOT_LIST(V) \
+ UNCONDITIONAL_STRONG_ROOT_LIST(V) \
+ V(Code, direct_c_entry_code, DirectCEntryCode)
#else
#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
#endif
@@ -177,6 +183,7 @@
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
+ V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \
@@ -203,7 +210,10 @@
V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \
V(identity_hash_symbol, "v8::IdentityHash") \
- V(closure_symbol, "(closure)")
+ V(closure_symbol, "(closure)") \
+ V(use_strict, "use strict") \
+ V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \
+ V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray")
// Forward declarations.
@@ -625,6 +635,19 @@
// Please note this does not perform a garbage collection.
MUST_USE_RESULT static MaybeObject* AllocateSharedFunctionInfo(Object* name);
+ // Allocates a new JSMessageObject object.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note that this does not perform a garbage collection.
+ MUST_USE_RESULT static MaybeObject* AllocateJSMessageObject(
+ String* type,
+ JSArray* arguments,
+ int start_position,
+ int end_position,
+ Object* script,
+ Object* stack_trace,
+ Object* stack_frames);
+
// Allocates a new cons string object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -1302,12 +1325,13 @@
static bool CreateInitialMaps();
static bool CreateInitialObjects();
- // These four Create*EntryStub functions are here and forced to not be inlined
+ // These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
NO_INLINE(static void CreateCEntryStub());
NO_INLINE(static void CreateJSEntryStub());
NO_INLINE(static void CreateJSConstructEntryStub());
NO_INLINE(static void CreateRegExpCEntryStub());
+ NO_INLINE(static void CreateDirectCEntryStub());
static void CreateFixedStubs();
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index b13bb0c..16100e4 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -253,6 +253,7 @@
if (other->opcode() != opcode()) return false;
if (!other->representation().Equals(representation())) return false;
if (!other->type_.Equals(type_)) return false;
+ if (other->flags() != flags()) return false;
if (OperandCount() != other->OperandCount()) return false;
for (int i = 0; i < OperandCount(); ++i) {
if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
@@ -280,6 +281,33 @@
}
+void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
+ if (index < 2) {
+ operands_[index] = value;
+ } else {
+ context_ = value;
+ }
+}
+
+
+void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
+ if (index < 3) {
+ operands_[index] = value;
+ } else {
+ context_ = value;
+ }
+}
+
+
+void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) {
+ if (index < 2) {
+ operands_[index] = value;
+ } else {
+ context_ = value;
+ }
+}
+
+
void HValue::ReplaceAndDelete(HValue* other) {
ReplaceValue(other);
Delete();
@@ -490,7 +518,7 @@
#ifdef DEBUG
-void HInstruction::Verify() const {
+void HInstruction::Verify() {
// Verify that input operands are defined before use.
HBasicBlock* cur_block = block();
for (int i = 0; i < OperandCount(); ++i) {
@@ -517,30 +545,73 @@
if (HasSideEffects() && !IsOsrEntry()) {
ASSERT(next()->IsSimulate());
}
+
+ // Verify that instructions that can be eliminated by GVN have overridden
+ // HValue::DataEquals. The default implementation is UNREACHABLE. We
+ // don't actually care whether DataEquals returns true or false here.
+ if (CheckFlag(kUseGVN)) DataEquals(this);
}
#endif
-HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
- for (int i = 0; i < count; ++i) arguments_[i] = NULL;
- set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
+void HCall::PrintDataTo(StringStream* stream) const {
+ stream->Add("#%d", argument_count());
}
-void HCall::PrintDataTo(StringStream* stream) const {
- stream->Add("(");
- for (int i = 0; i < arguments_.length(); ++i) {
- if (i != 0) stream->Add(", ");
- arguments_.at(i)->PrintNameTo(stream);
+void HUnaryCall::PrintDataTo(StringStream* stream) const {
+ value()->PrintNameTo(stream);
+ stream->Add(" ");
+ HCall::PrintDataTo(stream);
+}
+
+
+void HBinaryCall::PrintDataTo(StringStream* stream) const {
+ first()->PrintNameTo(stream);
+ stream->Add(" ");
+ second()->PrintNameTo(stream);
+ stream->Add(" ");
+ HCall::PrintDataTo(stream);
+}
+
+
+void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
+ if (IsApplyFunction()) {
+ stream->Add("optimized apply ");
+ } else {
+ stream->Add("%o ", function()->shared()->DebugName());
}
- stream->Add(")");
+ HCall::PrintDataTo(stream);
+}
+
+
+void HCallNamed::PrintDataTo(StringStream* stream) const {
+ stream->Add("%o ", *name());
+ HUnaryCall::PrintDataTo(stream);
+}
+
+
+void HCallGlobal::PrintDataTo(StringStream* stream) const {
+ stream->Add("%o ", *name());
+ HUnaryCall::PrintDataTo(stream);
+}
+
+
+void HCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+ stream->Add("o ", target()->shared()->DebugName());
+ HCall::PrintDataTo(stream);
+}
+
+
+void HCallRuntime::PrintDataTo(StringStream* stream) const {
+ stream->Add("%o ", *name());
+ HCall::PrintDataTo(stream);
}
void HClassOfTest::PrintDataTo(StringStream* stream) const {
stream->Add("class_of_test(");
- value()->PrintTo(stream);
+ value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
}
@@ -554,50 +625,29 @@
}
-void HCall::SetArgumentAt(int index, HPushArgument* push_argument) {
- push_argument->set_argument_index(index);
- SetOperandAt(index, push_argument);
-}
-
-
-void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
- if (IsApplyFunction()) {
- stream->Add("SPECIAL function: apply");
- } else {
- stream->Add("%s", *(function()->shared()->DebugName()->ToCString()));
+void HControlInstruction::PrintDataTo(StringStream* stream) const {
+ if (FirstSuccessor() != NULL) {
+ int first_id = FirstSuccessor()->block_id();
+ if (SecondSuccessor() == NULL) {
+ stream->Add(" B%d", first_id);
+ } else {
+ int second_id = SecondSuccessor()->block_id();
+ stream->Add(" goto (B%d, B%d)", first_id, second_id);
+ }
}
- HCall::PrintDataTo(stream);
}
-void HBranch::PrintDataTo(StringStream* stream) const {
- int first_id = FirstSuccessor()->block_id();
- int second_id = SecondSuccessor()->block_id();
- stream->Add("on ");
+void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
- stream->Add(" (B%d, B%d)", first_id, second_id);
+ HControlInstruction::PrintDataTo(stream);
}
-void HCompareMapAndBranch::PrintDataTo(StringStream* stream) const {
- stream->Add("on ");
+void HCompareMap::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
-}
-
-
-void HGoto::PrintDataTo(StringStream* stream) const {
- stream->Add("B%d", FirstSuccessor()->block_id());
-}
-
-
-void HReturn::PrintDataTo(StringStream* stream) const {
- value()->PrintNameTo(stream);
-}
-
-
-void HThrow::PrintDataTo(StringStream* stream) const {
- value()->PrintNameTo(stream);
+ HControlInstruction::PrintDataTo(stream);
}
@@ -662,14 +712,6 @@
}
-void HPushArgument::PrintDataTo(StringStream* stream) const {
- HUnaryOperation::PrintDataTo(stream);
- if (argument_index() != -1) {
- stream->Add(" [%d]", argument_index_);
- }
-}
-
-
void HChange::PrintDataTo(StringStream* stream) const {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
@@ -698,42 +740,19 @@
}
-void HCallKeyed::PrintDataTo(StringStream* stream) const {
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("](");
- for (int i = 1; i < arguments_.length(); ++i) {
- if (i != 1) stream->Add(", ");
- arguments_.at(i)->PrintNameTo(stream);
- }
- stream->Add(")");
-}
-
-
-void HCallNamed::PrintDataTo(StringStream* stream) const {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s ", *name_string);
- HCall::PrintDataTo(stream);
-}
-
-
-void HCallGlobal::PrintDataTo(StringStream* stream) const {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s ", *name_string);
- HCall::PrintDataTo(stream);
-}
-
-
-void HCallRuntime::PrintDataTo(StringStream* stream) const {
- SmartPointer<char> name_string = name()->ToCString();
- stream->Add("%s ", *name_string);
- HCall::PrintDataTo(stream);
-}
-
void HCallStub::PrintDataTo(StringStream* stream) const {
- stream->Add("%s(%d)",
- CodeStub::MajorName(major_key_, false),
- argument_count_);
+ stream->Add("%s ",
+ CodeStub::MajorName(major_key_, false));
+ HUnaryCall::PrintDataTo(stream);
+}
+
+
+void HInstanceOf::PrintDataTo(StringStream* stream) const {
+ left()->PrintNameTo(stream);
+ stream->Add(" ");
+ right()->PrintNameTo(stream);
+ stream->Add(" ");
+ context()->PrintNameTo(stream);
}
@@ -899,17 +918,6 @@
}
-bool HPhi::HasReceiverOperand() {
- for (int i = 0; i < OperandCount(); i++) {
- if (OperandAt(i)->IsParameter() &&
- HParameter::cast(OperandAt(i))->index() == 0) {
- return true;
- }
- }
- return false;
-}
-
-
HValue* HPhi::GetRedundantReplacement() const {
HValue* candidate = NULL;
int count = OperandCount();
@@ -1124,10 +1132,10 @@
void HCompare::SetInputRepresentation(Representation r) {
input_representation_ = r;
if (r.IsTagged()) {
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
ClearFlag(kUseGVN);
} else {
- ClearFlagMask(AllSideEffects());
+ ClearAllSideEffects();
SetFlag(kUseGVN);
}
}
@@ -1152,6 +1160,14 @@
}
+void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
+ external_pointer()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("]");
+}
+
+
void HStoreNamed::PrintDataTo(StringStream* stream) const {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -1192,7 +1208,15 @@
void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ value()->PrintNameTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
+ context()->PrintNameTo(stream);
+ stream->Add("[%d] = ", slot_index());
+ value()->PrintNameTo(stream);
}
@@ -1255,6 +1279,11 @@
}
+HType HBitwiseBinaryOperation::CalculateInferredType() const {
+ return HType::TaggedNumber();
+}
+
+
HType HArithmeticBinaryOperation::CalculateInferredType() const {
return HType::TaggedNumber();
}
@@ -1388,7 +1417,7 @@
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
-void HPhi::Verify() const {
+void HPhi::Verify() {
ASSERT(OperandCount() == block()->predecessors()->length());
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
@@ -1400,49 +1429,49 @@
}
-void HSimulate::Verify() const {
+void HSimulate::Verify() {
HInstruction::Verify();
ASSERT(HasAstId());
}
-void HBoundsCheck::Verify() const {
+void HBoundsCheck::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckSmi::Verify() const {
+void HCheckSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckNonSmi::Verify() const {
+void HCheckNonSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckInstanceType::Verify() const {
+void HCheckInstanceType::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckMap::Verify() const {
+void HCheckMap::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckFunction::Verify() const {
+void HCheckFunction::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
-void HCheckPrototypeMaps::Verify() const {
+void HCheckPrototypeMaps::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index eebec5a..a0d932f 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -46,112 +46,9 @@
class LChunkBuilder;
-// Type hierarchy:
-//
-// HValue
-// HInstruction
-// HAccessArgumentsAt
-// HApplyArguments
-// HArgumentsElements
-// HArgumentsLength
-// HArgumentsObject
-// HBinaryOperation
-// HArithmeticBinaryOperation
-// HAdd
-// HDiv
-// HMod
-// HMul
-// HSub
-// HBitwiseBinaryOperation
-// HBitAnd
-// HBitOr
-// HBitXor
-// HSar
-// HShl
-// HShr
-// HBoundsCheck
-// HCompare
-// HCompareJSObjectEq
-// HInstanceOf
-// HInstanceOfKnownGlobal
-// HLoadKeyed
-// HLoadKeyedFastElement
-// HLoadKeyedGeneric
-// HPower
-// HStoreNamed
-// HStoreNamedField
-// HStoreNamedGeneric
-// HBlockEntry
-// HCall
-// HCallConstantFunction
-// HCallFunction
-// HCallGlobal
-// HCallKeyed
-// HCallKnownGlobal
-// HCallNamed
-// HCallNew
-// HCallRuntime
-// HCallStub
-// HCheckPrototypeMaps
-// HConstant
-// HControlInstruction
-// HDeoptimize
-// HGoto
-// HUnaryControlInstruction
-// HBranch
-// HCompareMapAndBranch
-// HReturn
-// HThrow
-// HEnterInlined
-// HFunctionLiteral
-// HGlobalObject
-// HGlobalReceiver
-// HLeaveInlined
-// HLoadContextSlot
-// HLoadGlobal
-// HMaterializedLiteral
-// HArrayLiteral
-// HObjectLiteral
-// HRegExpLiteral
-// HOsrEntry
-// HParameter
-// HSimulate
-// HStackCheck
-// HStoreKeyed
-// HStoreKeyedFastElement
-// HStoreKeyedGeneric
-// HUnaryOperation
-// HBitNot
-// HChange
-// HCheckFunction
-// HCheckInstanceType
-// HCheckMap
-// HCheckNonSmi
-// HCheckSmi
-// HDeleteProperty
-// HFixedArrayLength
-// HJSArrayLength
-// HLoadElements
-// HTypeofIs
-// HLoadNamedField
-// HLoadNamedGeneric
-// HLoadFunctionPrototype
-// HPushArgument
-// HTypeof
-// HUnaryMathOperation
-// HUnaryPredicate
-// HClassOfTest
-// HHasCachedArrayIndex
-// HHasInstanceType
-// HIsNull
-// HIsObject
-// HIsSmi
-// HValueOf
-// HUnknownOSRValue
-// HPhi
-
#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \
+ V(BinaryCall) \
V(BinaryOperation) \
V(BitwiseBinaryOperation) \
V(Call) \
@@ -162,12 +59,14 @@
V(Phi) \
V(StoreKeyed) \
V(StoreNamed) \
+ V(UnaryCall) \
V(UnaryControlInstruction) \
V(UnaryOperation) \
HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
V(ApplyArguments) \
@@ -181,7 +80,6 @@
V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
- V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
@@ -200,8 +98,9 @@
V(CheckSmi) \
V(Compare) \
V(CompareJSObjectEq) \
- V(CompareMapAndBranch) \
+ V(CompareMap) \
V(Constant) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
@@ -216,6 +115,7 @@
V(IsNull) \
V(IsObject) \
V(IsSmi) \
+ V(IsConstructCall) \
V(HasInstanceType) \
V(HasCachedArrayIndex) \
V(JSArrayLength) \
@@ -223,17 +123,21 @@
V(LeaveInlined) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
+ V(LoadPixelArrayElement) \
+ V(LoadPixelArrayExternalPointer) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
+ V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -243,12 +147,16 @@
V(Shr) \
V(Simulate) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(Sub) \
+ V(Test) \
V(Throw) \
V(Typeof) \
V(TypeofIs) \
@@ -261,10 +169,11 @@
V(InobjectFields) \
V(BackingStoreFields) \
V(ArrayElements) \
+ V(PixelArrayElements) \
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
- V(FunctionPrototypes) \
+ V(ContextSlots) \
V(OsrEntries)
#define DECLARE_INSTRUCTION(type) \
@@ -386,6 +295,7 @@
kTagged,
kDouble,
kInteger32,
+ kExternal,
kNumRepresentations
};
@@ -395,6 +305,7 @@
static Representation Tagged() { return Representation(kTagged); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
+ static Representation External() { return Representation(kExternal); }
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
@@ -405,6 +316,7 @@
bool IsTagged() const { return kind_ == kTagged; }
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsDouble() const { return kind_ == kDouble; }
+ bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
return kind_ == kInteger32 || kind_ == kDouble;
}
@@ -569,11 +481,6 @@
return flags << kChangesToDependsFlagsLeftShift;
}
- // A flag mask to mark an instruction as having arbitrary side effects.
- static int AllSideEffects() {
- return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
- }
-
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@@ -632,9 +539,6 @@
return NULL;
}
- bool HasSideEffects() const {
- return (flags_ & AllSideEffects()) != 0;
- }
bool IsDefinedAfter(HBasicBlock* other) const;
// Operands.
@@ -657,12 +561,13 @@
void Delete();
int flags() const { return flags_; }
- void SetFlagMask(int mask) { flags_ |= mask; }
- void SetFlag(Flag f) { SetFlagMask(1 << f); }
- void ClearFlagMask(int mask) { flags_ &= ~mask; }
- void ClearFlag(Flag f) { ClearFlagMask(1 << f); }
- bool CheckFlag(Flag f) const { return CheckFlagMask(1 << f); }
- bool CheckFlagMask(int mask) const { return (flags_ & mask) != 0; }
+ void SetFlag(Flag f) { flags_ |= (1 << f); }
+ void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
+ bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+
+ void SetAllSideEffects() { flags_ |= AllSideEffects(); }
+ void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
+ bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
Range* range() const { return range_; }
bool HasRange() const { return range_ != NULL; }
@@ -706,15 +611,17 @@
virtual HType CalculateInferredType() const;
- // Helper for type conversions used by normal and phi instructions.
- void InsertInputConversion(HInstruction* previous, int index, HType type);
-
#ifdef DEBUG
- virtual void Verify() const = 0;
+ virtual void Verify() = 0;
#endif
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ // This function must be overridden for instructions with flag kUseGVN, to
+ // compare the non-Operand parts of the instruction.
+ virtual bool DataEquals(HValue* other) const {
+ UNREACHABLE();
+ return false;
+ }
virtual void RepresentationChanged(Representation to) { }
virtual Range* InferRange();
virtual void DeleteFromGraph() = 0;
@@ -731,6 +638,11 @@
}
private:
+ // A flag mask to mark an instruction as having arbitrary side effects.
+ static int AllSideEffects() {
+ return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
+ }
+
void InternalReplaceAtUse(HValue* use, HValue* other);
void RegisterUse(int index, HValue* new_value);
@@ -770,7 +682,7 @@
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
// Returns whether this is some kind of deoptimizing check
@@ -811,44 +723,55 @@
class HControlInstruction: public HInstruction {
public:
- virtual HBasicBlock* FirstSuccessor() const { return NULL; }
- virtual HBasicBlock* SecondSuccessor() const { return NULL; }
+ HControlInstruction(HBasicBlock* first, HBasicBlock* second)
+ : first_successor_(first), second_successor_(second) {
+ }
+
+ HBasicBlock* FirstSuccessor() const { return first_successor_; }
+ HBasicBlock* SecondSuccessor() const { return second_successor_; }
+
+ virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(ControlInstruction)
+
+ private:
+ HBasicBlock* first_successor_;
+ HBasicBlock* second_successor_;
};
class HDeoptimize: public HControlInstruction {
public:
+ HDeoptimize() : HControlInstruction(NULL, NULL) { }
+
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
class HGoto: public HControlInstruction {
public:
- explicit HGoto(HBasicBlock* destination)
- : destination_(destination),
- include_stack_check_(false) {}
+ explicit HGoto(HBasicBlock* target)
+ : HControlInstruction(target, NULL), include_stack_check_(false) {
+ }
- virtual HBasicBlock* FirstSuccessor() const { return destination_; }
void set_include_stack_check(bool include_stack_check) {
include_stack_check_ = include_stack_check;
}
bool include_stack_check() const { return include_stack_check_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
private:
- HBasicBlock* destination_;
bool include_stack_check_;
};
class HUnaryControlInstruction: public HControlInstruction {
public:
- explicit HUnaryControlInstruction(HValue* value) {
+ explicit HUnaryControlInstruction(HValue* value,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target)
+ : HControlInstruction(true_target, false_target) {
SetOperandAt(0, value);
}
@@ -856,6 +779,8 @@
return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) const;
+
HValue* value() const { return OperandAt(0); }
virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const { return operands_[index]; }
@@ -872,85 +797,60 @@
};
-class HBranch: public HUnaryControlInstruction {
+class HTest: public HUnaryControlInstruction {
public:
- HBranch(HBasicBlock* true_destination,
- HBasicBlock* false_destination,
- HValue* boolean_value)
- : HUnaryControlInstruction(boolean_value),
- true_destination_(true_destination),
- false_destination_(false_destination) {
- ASSERT(true_destination != NULL && false_destination != NULL);
+ HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ ASSERT(true_target != NULL && false_target != NULL);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
- virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
-
- virtual void PrintDataTo(StringStream* stream) const;
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-
- private:
- HBasicBlock* true_destination_;
- HBasicBlock* false_destination_;
+ DECLARE_CONCRETE_INSTRUCTION(Test, "test")
};
-class HCompareMapAndBranch: public HUnaryControlInstruction {
+class HCompareMap: public HUnaryControlInstruction {
public:
- HCompareMapAndBranch(HValue* result,
- Handle<Map> map,
- HBasicBlock* true_destination,
- HBasicBlock* false_destination)
- : HUnaryControlInstruction(result),
- map_(map),
- true_destination_(true_destination),
- false_destination_(false_destination) {
- ASSERT(true_destination != NULL);
- ASSERT(false_destination != NULL);
+ HCompareMap(HValue* value,
+ Handle<Map> map,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ map_(map) {
+ ASSERT(true_target != NULL);
+ ASSERT(false_target != NULL);
ASSERT(!map.is_null());
}
- virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
- virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
-
- HBasicBlock* true_destination() const { return true_destination_; }
- HBasicBlock* false_destination() const { return false_destination_; }
-
virtual void PrintDataTo(StringStream* stream) const;
Handle<Map> map() const { return map_; }
- DECLARE_CONCRETE_INSTRUCTION(CompareMapAndBranch, "compare_map_and_branch")
+ DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
private:
Handle<Map> map_;
- HBasicBlock* true_destination_;
- HBasicBlock* false_destination_;
};
class HReturn: public HUnaryControlInstruction {
public:
- explicit HReturn(HValue* result) : HUnaryControlInstruction(result) { }
-
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit HReturn(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) {
+ }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class HThrow: public HUnaryControlInstruction {
+class HAbnormalExit: public HControlInstruction {
public:
- explicit HThrow(HValue* value) : HUnaryControlInstruction(value) { }
+ HAbnormalExit() : HControlInstruction(NULL, NULL) { }
- virtual void PrintDataTo(StringStream* stream) const;
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+ DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit")
};
@@ -977,6 +877,20 @@
};
+class HThrow: public HUnaryOperation {
+ public:
+ explicit HThrow(HValue* value) : HUnaryOperation(value) {
+ SetAllSideEffects();
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
class HChange: public HUnaryOperation {
public:
HChange(HValue* value,
@@ -1070,7 +984,7 @@
DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
protected:
@@ -1100,7 +1014,7 @@
public:
HStackCheck() { }
- DECLARE_CONCRETE_INSTRUCTION(Throw, "stack_check")
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check")
};
@@ -1133,86 +1047,151 @@
class HPushArgument: public HUnaryOperation {
public:
- explicit HPushArgument(HValue* value)
- : HUnaryOperation(value), argument_index_(-1) {
- set_representation(Representation::Tagged());
- }
+ explicit HPushArgument(HValue* value) : HUnaryOperation(value) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
HValue* argument() const { return OperandAt(0); }
- int argument_index() const { return argument_index_; }
- void set_argument_index(int index) {
- ASSERT(argument_index_ == -1 || index == argument_index_);
- argument_index_ = index;
- }
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
-
- private:
- int argument_index_;
};
-class HGlobalObject: public HInstruction {
+class HContext: public HInstruction {
public:
- HGlobalObject() {
+ HContext() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context");
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HOuterContext: public HUnaryOperation {
+ public:
+ explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HGlobalObject: public HUnaryOperation {
+ public:
+ explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
-class HGlobalReceiver: public HInstruction {
+class HGlobalReceiver: public HUnaryOperation {
public:
- HGlobalReceiver() {
+ explicit HGlobalReceiver(HValue* global_object)
+ : HUnaryOperation(global_object) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
class HCall: public HInstruction {
public:
- // Construct a call with uninitialized arguments. The argument count
- // includes the receiver.
- explicit HCall(int count);
+ // The argument count includes the receiver.
+ explicit HCall(int argument_count) : argument_count_(argument_count) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
virtual HType CalculateInferredType() const { return HType::Tagged(); }
- // TODO(3190496): This needs a cleanup. We don't want the arguments
- // be operands of the call instruction. This results in bad code quality.
- virtual int argument_count() const { return arguments_.length(); }
- virtual int OperandCount() const { return argument_count(); }
- virtual HValue* OperandAt(int index) const { return arguments_[index]; }
- virtual HPushArgument* PushArgumentAt(int index) const {
- return HPushArgument::cast(OperandAt(index));
- }
- virtual HValue* ArgumentAt(int index) const {
- return PushArgumentAt(index)->argument();
- }
- virtual void SetArgumentAt(int index, HPushArgument* push_argument);
+ virtual int argument_count() const { return argument_count_; }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(Call)
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- arguments_[index] = value;
+ private:
+ int argument_count_;
+};
+
+
+class HUnaryCall: public HCall {
+ public:
+ HUnaryCall(HValue* value, int argument_count)
+ : HCall(argument_count), value_(NULL) {
+ SetOperandAt(0, value);
}
- int argument_count_;
- Vector<HValue*> arguments_;
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ HValue* value() const { return value_; }
+
+ virtual int OperandCount() const { return 1; }
+ virtual HValue* OperandAt(int index) const {
+ ASSERT(index == 0);
+ return value_;
+ }
+
+ DECLARE_INSTRUCTION(UnaryCall)
+
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value) {
+ ASSERT(index == 0);
+ value_ = value;
+ }
+
+ private:
+ HValue* value_;
+};
+
+
+class HBinaryCall: public HCall {
+ public:
+ HBinaryCall(HValue* first, HValue* second, int argument_count)
+ : HCall(argument_count) {
+ SetOperandAt(0, first);
+ SetOperandAt(1, second);
+ }
+
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ HValue* first() const { return operands_[0]; }
+ HValue* second() const { return operands_[1]; }
+
+ virtual int OperandCount() const { return 2; }
+ virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+ DECLARE_INSTRUCTION(BinaryCall)
+
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value) {
+ operands_[index] = value;
+ }
+
+ private:
+ HOperandVector<2> operands_;
};
@@ -1222,6 +1201,7 @@
: HCall(argument_count), function_(function) { }
Handle<JSFunction> function() const { return function_; }
+
bool IsApplyFunction() const {
return function_->code() == Builtins::builtin(Builtins::FunctionApply);
}
@@ -1235,42 +1215,32 @@
};
-class HCallKeyed: public HCall {
+class HCallKeyed: public HBinaryCall {
public:
- HCallKeyed(HValue* key, int argument_count)
- : HCall(argument_count + 1) {
- SetOperandAt(0, key);
+ HCallKeyed(HValue* context, HValue* key, int argument_count)
+ : HBinaryCall(context, key, argument_count) {
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- // TODO(3190496): This is a hack to get an additional operand that
- // is not an argument to work with the current setup. This _needs_ a cleanup.
- // (see HCall)
- virtual void PrintDataTo(StringStream* stream) const;
- HValue* key() const { return OperandAt(0); }
- virtual int argument_count() const { return arguments_.length() - 1; }
- virtual int OperandCount() const { return arguments_.length(); }
- virtual HValue* OperandAt(int index) const { return arguments_[index]; }
- virtual HPushArgument* PushArgumentAt(int index) const {
- return HPushArgument::cast(OperandAt(index + 1));
- }
- virtual void SetArgumentAt(int index, HPushArgument* push_argument) {
- HCall::SetArgumentAt(index + 1, push_argument);
- }
+ HValue* context() const { return first(); }
+ HValue* key() const { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
};
-class HCallNamed: public HCall {
+class HCallNamed: public HUnaryCall {
public:
- HCallNamed(Handle<String> name, int argument_count)
- : HCall(argument_count), name_(name) { }
+ HCallNamed(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
+
virtual void PrintDataTo(StringStream* stream) const;
+ HValue* context() const { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
@@ -1280,21 +1250,27 @@
};
-class HCallFunction: public HCall {
+class HCallFunction: public HUnaryCall {
public:
- explicit HCallFunction(int argument_count) : HCall(argument_count) { }
+ HCallFunction(HValue* context, int argument_count)
+ : HUnaryCall(context, argument_count) {
+ }
+
+ HValue* context() const { return value(); }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
};
-class HCallGlobal: public HCall {
+class HCallGlobal: public HUnaryCall {
public:
- HCallGlobal(Handle<String> name, int argument_count)
- : HCall(argument_count), name_(name) { }
+ HCallGlobal(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
virtual void PrintDataTo(StringStream* stream) const;
+ HValue* context() const { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
@@ -1306,10 +1282,11 @@
class HCallKnownGlobal: public HCall {
public:
- HCallKnownGlobal(Handle<JSFunction> target,
- int argument_count)
+ HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
: HCall(argument_count), target_(target) { }
+ virtual void PrintDataTo(StringStream* stream) const;
+
Handle<JSFunction> target() const { return target_; }
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
@@ -1319,15 +1296,18 @@
};
-class HCallNew: public HCall {
+class HCallNew: public HBinaryCall {
public:
- explicit HCallNew(int argument_count) : HCall(argument_count) { }
+ HCallNew(HValue* context, HValue* constructor, int argument_count)
+ : HBinaryCall(context, constructor, argument_count) {
+ }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- HValue* constructor() const { return ArgumentAt(0); }
+ HValue* context() const { return first(); }
+ HValue* constructor() const { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
};
@@ -1368,6 +1348,9 @@
}
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1384,6 +1367,30 @@
}
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HPixelArrayLength: public HUnaryOperation {
+ public:
+ explicit HPixelArrayLength(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ // The result of this instruction is idempotent as long as its inputs don't
+ // change. The length of a pixel array cannot change once set, so it's not
+ // necessary to introduce a kDependsOnArrayLengths or any other dependency.
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1401,6 +1408,9 @@
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1496,6 +1506,33 @@
}
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HLoadPixelArrayExternalPointer: public HUnaryOperation {
+ public:
+ explicit HLoadPixelArrayExternalPointer(HValue* value)
+ : HUnaryOperation(value) {
+ set_representation(Representation::External());
+ // The result of this instruction is idempotent as long as its inputs don't
+ // change. The external array of a pixel array elements object cannot
+ // change once set, so it's no necessary to introduce any additional
+ // dependencies on top of the inputs.
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
+ "load-pixel-array-external-pointer")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1517,7 +1554,7 @@
virtual HType CalculateInferredType() const;
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
Handle<Map> map() const { return map_; }
@@ -1552,7 +1589,7 @@
virtual HType CalculateInferredType() const;
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
Handle<JSFunction> target() const { return target_; }
@@ -1579,6 +1616,12 @@
ASSERT(first <= last);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
+ if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
+ (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
+ // A particular string instance type can change because of GC or
+ // externalization, but the value still remains a string.
+ SetFlag(kDependsOnMaps);
+ }
}
virtual bool IsCheckInstruction() const { return true; }
@@ -1588,7 +1631,7 @@
}
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
@@ -1629,10 +1672,13 @@
virtual HType CalculateInferredType() const;
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1647,7 +1693,7 @@
virtual bool IsCheckInstruction() const { return true; }
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
Handle<JSObject> prototype() const { return prototype_; }
@@ -1690,10 +1736,13 @@
virtual HType CalculateInferredType() const;
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1737,7 +1786,7 @@
HValue* GetRedundantReplacement() const;
void AddInput(HValue* value);
- bool HasReceiverOperand();
+ bool IsReceiver() { return merged_index_ == 0; }
int merged_index() const { return merged_index_; }
@@ -1746,7 +1795,7 @@
virtual void PrintTo(StringStream* stream) const;
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
DECLARE_INSTRUCTION(Phi)
@@ -1834,7 +1883,7 @@
}
#ifdef DEBUG
- virtual void Verify() const { }
+ virtual void Verify() { }
#endif
DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
@@ -1896,7 +1945,6 @@
operands_[index] = value;
}
- private:
HOperandVector<2> operands_;
};
@@ -1912,6 +1960,7 @@
SetOperandAt(1, receiver);
SetOperandAt(2, length);
SetOperandAt(3, elements);
+ SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) const {
@@ -1931,8 +1980,6 @@
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
-
-
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
operands_[index] = value;
@@ -1953,6 +2000,9 @@
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -1964,6 +2014,9 @@
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2000,6 +2053,8 @@
operands_[index] = value;
}
+ virtual bool DataEquals(HValue* other) const { return true; }
+
private:
HOperandVector<3> operands_;
};
@@ -2019,13 +2074,16 @@
}
#ifdef DEBUG
- virtual void Verify() const;
+ virtual void Verify();
#endif
HValue* index() const { return left(); }
HValue* length() const { return right(); }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2033,16 +2091,26 @@
public:
HBitwiseBinaryOperation(HValue* left, HValue* right)
: HBinaryOperation(left, right) {
- // Default to truncating, Integer32, UseGVN.
- set_representation(Representation::Integer32());
- SetFlag(kTruncatingToInt32);
- SetFlag(kUseGVN);
+ set_representation(Representation::Tagged());
+ SetFlag(kFlexibleRepresentation);
+ SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Integer32();
+ return representation();
}
+ virtual void RepresentationChanged(Representation to) {
+ if (!to.IsTagged()) {
+ ASSERT(to.IsInteger32());
+ ClearAllSideEffects();
+ SetFlag(kTruncatingToInt32);
+ SetFlag(kUseGVN);
+ }
+ }
+
+ HType CalculateInferredType() const;
+
DECLARE_INSTRUCTION(BitwiseBinaryOperation)
};
@@ -2053,12 +2121,12 @@
: HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
}
virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) {
- ClearFlagMask(AllSideEffects());
+ ClearAllSideEffects();
SetFlag(kUseGVN);
}
}
@@ -2084,11 +2152,15 @@
: HBinaryOperation(left, right), token_(token) {
ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
}
void SetInputRepresentation(Representation r);
- virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+
+ virtual bool EmitAtUses() const {
+ return !HasSideEffects() && (uses()->length() <= 1);
+ }
+
virtual Representation RequiredInputRepresentation(int index) const {
return input_representation_;
}
@@ -2126,13 +2198,19 @@
SetFlag(kUseGVN);
}
- virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+ virtual bool EmitAtUses() const {
+ return !HasSideEffects() && (uses()->length() <= 1);
+ }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2142,7 +2220,11 @@
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+
+ virtual bool EmitAtUses() const {
+ return !HasSideEffects() && (uses()->length() <= 1);
+ }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2175,6 +2257,9 @@
explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2183,6 +2268,27 @@
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HIsConstructCall: public HInstruction {
+ public:
+ HIsConstructCall() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool EmitAtUses() const {
+ return !HasSideEffects() && (uses()->length() <= 1);
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2219,6 +2325,9 @@
explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2265,20 +2374,42 @@
};
-class HInstanceOf: public HBinaryOperation {
+class HInstanceOf: public HInstruction {
public:
- HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+ HInstanceOf(HValue* context, HValue* left, HValue* right) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
}
- virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+ HValue* context() const { return operands_[0]; }
+ HValue* left() const { return operands_[1]; }
+ HValue* right() const { return operands_[2]; }
+
+ virtual bool EmitAtUses() const {
+ return !HasSideEffects() && (uses()->length() <= 1);
+ }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ virtual int OperandCount() const { return 3; }
+ virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
+
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value) {
+ operands_[index] = value;
+ }
+
+ private:
+ HOperandVector<3> operands_;
};
@@ -2287,7 +2418,7 @@
HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
: HUnaryOperation(left), function_(right) {
set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
}
Handle<JSFunction> function() { return function_; }
@@ -2317,6 +2448,9 @@
}
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2339,6 +2473,8 @@
DECLARE_CONCRETE_INSTRUCTION(Add, "add")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2354,6 +2490,8 @@
DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2374,6 +2512,8 @@
DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2389,6 +2529,8 @@
DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2405,6 +2547,8 @@
DECLARE_CONCRETE_INSTRUCTION(Div, "div")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2420,6 +2564,8 @@
DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2433,6 +2579,9 @@
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2447,6 +2596,8 @@
DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
virtual Range* InferRange();
};
@@ -2460,6 +2611,9 @@
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2471,6 +2625,9 @@
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2483,6 +2640,9 @@
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
@@ -2518,18 +2678,17 @@
};
-class HCallStub: public HInstruction {
+class HCallStub: public HUnaryCall {
public:
- HCallStub(CodeStub::Major major_key, int argument_count)
- : major_key_(major_key),
- argument_count_(argument_count),
+ HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
+ : HUnaryCall(context, argument_count),
+ major_key_(major_key),
transcendental_type_(TranscendentalCache::kNumberOfCaches) {
- set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
}
CodeStub::Major major_key() { return major_key_; }
- int argument_count() { return argument_count_; }
+
+ HValue* context() const { return value(); }
void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
transcendental_type_ = transcendental_type;
@@ -2537,13 +2696,13 @@
TranscendentalCache::Type transcendental_type() {
return transcendental_type_;
}
+
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
private:
CodeStub::Major major_key_;
- int argument_count_;
TranscendentalCache::Type transcendental_type_;
};
@@ -2594,12 +2753,17 @@
class HStoreGlobal: public HUnaryOperation {
public:
- HStoreGlobal(HValue* value, Handle<JSGlobalPropertyCell> cell)
- : HUnaryOperation(value), cell_(cell) {
+ HStoreGlobal(HValue* value,
+ Handle<JSGlobalPropertyCell> cell,
+ bool check_hole_value)
+ : HUnaryOperation(value),
+ cell_(cell),
+ check_hole_value_(check_hole_value) {
SetFlag(kChangesGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+ bool check_hole_value() const { return check_hole_value_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -2608,46 +2772,72 @@
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
- protected:
- virtual bool DataEquals(HValue* other) const {
- HStoreGlobal* b = HStoreGlobal::cast(other);
- return cell_.is_identical_to(b->cell());
- }
-
private:
Handle<JSGlobalPropertyCell> cell_;
+ bool check_hole_value_;
};
-class HLoadContextSlot: public HInstruction {
+class HLoadContextSlot: public HUnaryOperation {
public:
- HLoadContextSlot(int context_chain_length , int slot_index)
- : context_chain_length_(context_chain_length), slot_index_(slot_index) {
+ HLoadContextSlot(HValue* context , int slot_index)
+ : HUnaryOperation(context), slot_index_(slot_index) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
+ SetFlag(kDependsOnContextSlots);
}
- int context_chain_length() const { return context_chain_length_; }
int slot_index() const { return slot_index_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- virtual intptr_t Hashcode() const {
- return context_chain_length() * 29 + slot_index();
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) const;
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
protected:
virtual bool DataEquals(HValue* other) const {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
- return (context_chain_length() == b->context_chain_length())
- && (slot_index() == b->slot_index());
+ return (slot_index() == b->slot_index());
}
private:
- int context_chain_length_;
+ int slot_index_;
+};
+
+
+static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+ return !value->type().IsSmi() &&
+ !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+}
+
+
+class HStoreContextSlot: public HBinaryOperation {
+ public:
+ HStoreContextSlot(HValue* context, int slot_index, HValue* value)
+ : HBinaryOperation(context, value), slot_index_(slot_index) {
+ SetFlag(kChangesContextSlots);
+ }
+
+ HValue* context() const { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
+ int slot_index() const { return slot_index_; }
+
+ bool NeedsWriteBarrier() const {
+ return StoringValueNeedsWriteBarrier(value());
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
+
+ private:
int slot_index_;
};
@@ -2690,15 +2880,16 @@
};
-class HLoadNamedGeneric: public HUnaryOperation {
+class HLoadNamedGeneric: public HBinaryOperation {
public:
- HLoadNamedGeneric(HValue* object, Handle<Object> name)
- : HUnaryOperation(object), name_(name) {
+ HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
+ : HBinaryOperation(context, object), name_(name) {
set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
}
- HValue* object() const { return OperandAt(0); }
+ HValue* context() const { return OperandAt(0); }
+ HValue* object() const { return OperandAt(1); }
Handle<Object> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
@@ -2707,12 +2898,6 @@
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
- protected:
- virtual bool DataEquals(HValue* other) const {
- HLoadNamedGeneric* b = HLoadNamedGeneric::cast(other);
- return name_.is_identical_to(b->name_);
- }
-
private:
Handle<Object> name_;
};
@@ -2723,7 +2908,8 @@
explicit HLoadFunctionPrototype(HValue* function)
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
- SetFlagMask(kDependsOnFunctionPrototypes);
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnCalls);
}
HValue* function() const { return OperandAt(0); }
@@ -2772,28 +2958,73 @@
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
"load_keyed_fast_element")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
+class HLoadPixelArrayElement: public HBinaryOperation {
+ public:
+ HLoadPixelArrayElement(HValue* external_elements, HValue* key)
+ : HBinaryOperation(external_elements, key) {
+ set_representation(Representation::Integer32());
+ SetFlag(kDependsOnPixelArrayElements);
+ // Native code could change the pixel array.
+ SetFlag(kDependsOnCalls);
+ SetFlag(kUseGVN);
+ }
+
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ // The key is supposed to be Integer32, but the base pointer
+ // for the element load is a naked pointer.
+ return (index == 1) ? Representation::Integer32()
+ : Representation::External();
+ }
+
+ HValue* external_pointer() const { return OperandAt(0); }
+ HValue* key() const { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
+ "load_pixel_array_element")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
};
class HLoadKeyedGeneric: public HLoadKeyed {
public:
- HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
- SetFlagMask(AllSideEffects());
+ HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key)
+ : HLoadKeyed(obj, key), context_(NULL) {
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
+
+ HValue* context() const { return context_; }
+ HValue* object() const { return operands_[0]; }
+ HValue* key() const { return operands_[1]; }
+
+ virtual int OperandCount() const { return 3; }
+ virtual HValue* OperandAt(int index) const {
+ return (index < 2) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
+
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value);
+
+ private:
+ HValue* context_;
};
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsSmi() &&
- !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
class HStoreNamed: public HBinaryOperation {
public:
- HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
+ HStoreNamed(HValue* obj, Handle<String> name, HValue* val)
: HBinaryOperation(obj, val), name_(name) {
}
@@ -2804,31 +3035,21 @@
virtual void PrintDataTo(StringStream* stream) const;
HValue* object() const { return OperandAt(0); }
- Handle<Object> name() const { return name_; }
+ Handle<String> name() const { return name_; }
HValue* value() const { return OperandAt(1); }
void set_value(HValue* value) { SetOperandAt(1, value); }
- bool NeedsWriteBarrier() const {
- return StoringValueNeedsWriteBarrier(value());
- }
-
DECLARE_INSTRUCTION(StoreNamed)
- protected:
- virtual bool DataEquals(HValue* other) const {
- HStoreNamed* b = HStoreNamed::cast(other);
- return name_.is_identical_to(b->name_);
- }
-
private:
- Handle<Object> name_;
+ Handle<String> name_;
};
class HStoreNamedField: public HStoreNamed {
public:
HStoreNamedField(HValue* obj,
- Handle<Object> name,
+ Handle<String> name,
HValue* val,
bool in_object,
int offset)
@@ -2845,7 +3066,7 @@
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
+ return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
@@ -2854,6 +3075,10 @@
Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; }
+ bool NeedsWriteBarrier() const {
+ return StoringValueNeedsWriteBarrier(value());
+ }
+
private:
bool is_in_object_;
int offset_;
@@ -2863,12 +3088,32 @@
class HStoreNamedGeneric: public HStoreNamed {
public:
- HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
- : HStoreNamed(obj, name, val) {
- SetFlagMask(AllSideEffects());
+ HStoreNamedGeneric(HValue* context,
+ HValue* object,
+ Handle<String> name,
+ HValue* value)
+ : HStoreNamed(object, name, value), context_(NULL) {
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
+
+ HValue* context() const { return context_; }
+ HValue* object() const { return operands_[0]; }
+ HValue* value() const { return operands_[1]; }
+
+ virtual int OperandCount() const { return 3; }
+
+ virtual HValue* OperandAt(int index) const {
+ return (index < 2) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
+
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value);
+
+ private:
+ HValue* context_;
};
@@ -2903,7 +3148,6 @@
operands_[index] = value;
}
- private:
HOperandVector<3> operands_;
};
@@ -2928,12 +3172,88 @@
class HStoreKeyedGeneric: public HStoreKeyed {
public:
- HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
- : HStoreKeyed(obj, key, val) {
- SetFlagMask(AllSideEffects());
+ HStoreKeyedGeneric(HValue* context,
+ HValue* object,
+ HValue* key,
+ HValue* value)
+ : HStoreKeyed(object, key, value), context_(NULL) {
+ SetOperandAt(3, context);
+ SetAllSideEffects();
+ }
+
+ HValue* context() const { return context_; }
+ HValue* object() const { return operands_[0]; }
+ HValue* key() const { return operands_[1]; }
+ HValue* value() const { return operands_[2]; }
+
+ virtual int OperandCount() const { return 4; }
+
+ virtual HValue* OperandAt(int index) const {
+ return (index < 3) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value);
+
+ private:
+ HValue* context_;
+};
+
+
+class HStringCharCodeAt: public HBinaryOperation {
+ public:
+ HStringCharCodeAt(HValue* string, HValue* index)
+ : HBinaryOperation(string, index) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ // The index is supposed to be Integer32.
+ return (index == 1) ? Representation::Integer32()
+ : Representation::Tagged();
+ }
+
+ HValue* string() const { return OperandAt(0); }
+ HValue* index() const { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
+ virtual Range* InferRange() {
+ return new Range(0, String::kMaxUC16CharCode);
+ }
+};
+
+
+class HStringLength: public HUnaryOperation {
+ public:
+ explicit HStringLength(HValue* string) : HUnaryOperation(string) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType() const {
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ return HType::Smi();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+
+ virtual Range* InferRange() {
+ return new Range(0, String::kMaxLength);
+ }
};
@@ -2980,22 +3300,36 @@
class HObjectLiteral: public HMaterializedLiteral {
public:
- HObjectLiteral(Handle<FixedArray> constant_properties,
+ HObjectLiteral(HValue* context,
+ Handle<FixedArray> constant_properties,
bool fast_elements,
int literal_index,
int depth)
: HMaterializedLiteral(literal_index, depth),
+ context_(NULL),
constant_properties_(constant_properties),
- fast_elements_(fast_elements) {}
+ fast_elements_(fast_elements) {
+ SetOperandAt(0, context);
+ }
+ HValue* context() const { return context_; }
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
bool fast_elements() const { return fast_elements_; }
+ virtual int OperandCount() const { return 1; }
+ virtual HValue* OperandAt(int index) const { return context_; }
+
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
+ protected:
+ virtual void InternalSetOperandAt(int index, HValue* value) {
+ context_ = value;
+ }
+
private:
+ HValue* context_;
Handle<FixedArray> constant_properties_;
bool fast_elements_;
};
@@ -3045,6 +3379,10 @@
set_representation(Representation::Tagged());
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -3064,7 +3402,7 @@
HDeleteProperty(HValue* obj, HValue* key)
: HBinaryOperation(obj, key) {
set_representation(Representation::Tagged());
- SetFlagMask(AllSideEffects());
+ SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) const {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index da41ef9..9be3176 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -34,6 +34,7 @@
#include "lithium-allocator.h"
#include "parser.h"
#include "scopes.h"
+#include "stub-cache.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h"
@@ -64,6 +65,7 @@
first_instruction_index_(-1),
last_instruction_index_(-1),
deleted_phis_(4),
+ parent_loop_header_(NULL),
is_inline_return_target_(false) {
}
@@ -292,20 +294,6 @@
// Check that every block is finished.
ASSERT(IsFinished());
ASSERT(block_id() >= 0);
-
- // Verify that all blocks targetting a branch target, have the same boolean
- // value on top of their expression stack.
- if (!cond().is_null()) {
- ASSERT(predecessors()->length() > 0);
- for (int i = 1; i < predecessors()->length(); i++) {
- HBasicBlock* pred = predecessors()->at(i);
- HValue* top = pred->last_environment()->Top();
- ASSERT(top->IsConstant());
- Object* a = *HConstant::cast(top)->handle();
- Object* b = *cond();
- ASSERT(a == b);
- }
- }
}
#endif
@@ -504,19 +492,15 @@
void HSubgraph::AppendOptional(HSubgraph* graph,
bool on_true_branch,
- HValue* boolean_value) {
+ HValue* value) {
ASSERT(HasExit() && graph->HasExit());
HBasicBlock* other_block = graph_->CreateBasicBlock();
HBasicBlock* join_block = graph_->CreateBasicBlock();
- HBasicBlock* true_branch = other_block;
- HBasicBlock* false_branch = graph->entry_block();
- if (on_true_branch) {
- true_branch = graph->entry_block();
- false_branch = other_block;
- }
-
- exit_block_->Finish(new HBranch(true_branch, false_branch, boolean_value));
+ HTest* test = on_true_branch
+ ? new HTest(value, graph->entry_block(), other_block)
+ : new HTest(value, other_block, graph->entry_block());
+ exit_block_->Finish(test);
other_block->Goto(join_block);
graph->exit_block()->Goto(join_block);
exit_block_ = join_block;
@@ -687,7 +671,7 @@
}
-bool HGraph::AllowAggressiveOptimizations() const {
+bool HGraph::AllowCodeMotion() const {
return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
}
@@ -873,13 +857,11 @@
}
uses_to_replace.Rewind(0);
block->RemovePhi(phi);
- } else if (phi->HasNoUses() &&
- !phi->HasReceiverOperand() &&
- FLAG_eliminate_dead_phis) {
- // We can't eliminate phis that have the receiver as an operand
- // because in case of throwing an error we need the correct
- // receiver value in the environment to construct a corrent
- // stack trace.
+ } else if (FLAG_eliminate_dead_phis && phi->HasNoUses() &&
+ !phi->IsReceiver()) {
+ // We can't eliminate phis in the receiver position in the environment
+ // because in case of throwing an error we need this value to
+ // construct a stack trace.
block->RemovePhi(phi);
block->RecordDeletedPhi(phi->merged_index());
}
@@ -934,7 +916,7 @@
private:
void TraceRange(const char* msg, ...);
void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HBranch* branch, HBasicBlock* dest);
+ void InferControlFlowRange(HTest* test, HBasicBlock* dest);
void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferPhiRange(HPhi* phi);
void InferRange(HValue* value);
@@ -970,8 +952,8 @@
// Infer range based on control flow.
if (block->predecessors()->length() == 1) {
HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsBranch()) {
- InferControlFlowRange(HBranch::cast(pred->end()), block);
+ if (pred->end()->IsTest()) {
+ InferControlFlowRange(HTest::cast(pred->end()), block);
}
}
@@ -997,14 +979,12 @@
}
-void HRangeAnalysis::InferControlFlowRange(HBranch* branch, HBasicBlock* dest) {
- ASSERT(branch->FirstSuccessor() == dest || branch->SecondSuccessor() == dest);
- ASSERT(branch->FirstSuccessor() != dest || branch->SecondSuccessor() != dest);
-
- if (branch->value()->IsCompare()) {
- HCompare* compare = HCompare::cast(branch->value());
+void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
+ ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
+ if (test->value()->IsCompare()) {
+ HCompare* compare = HCompare::cast(test->value());
Token::Value op = compare->token();
- if (branch->SecondSuccessor() == dest) {
+ if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
}
Token::Value inverted_op = Token::InvertCompareOp(op);
@@ -1451,19 +1431,23 @@
}
}
-// Only move instructions that postdominate the loop header (i.e. are
-// always executed inside the loop). This is to avoid unnecessary
-// deoptimizations assuming the loop is executed at least once.
-// TODO(fschneider): Better type feedback should give us information
-// about code that was never executed.
+
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
- if (FLAG_aggressive_loop_invariant_motion &&
- !instr->IsChange() &&
- (!instr->IsCheckInstruction() ||
- graph_->AllowAggressiveOptimizations())) {
+ // If we've disabled code motion, don't move any instructions.
+ if (!graph_->AllowCodeMotion()) return false;
+
+ // If --aggressive-loop-invariant-motion, move everything except change
+ // instructions.
+ if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
return true;
}
+
+ // Otherwise only move instructions that postdominate the loop header
+ // (i.e. are always executed inside the loop). This is to avoid
+ // unnecessary deoptimizations assuming the loop is executed at least
+ // once. TODO(fschneider): Better type feedback should give us
+ // information about code that was never executed.
HBasicBlock* block = instr->block();
bool result = true;
if (block != loop_header) {
@@ -1814,28 +1798,17 @@
HValue* use,
Representation to,
bool is_truncating) {
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = value->representation();
- if (from.IsInteger32()) {
- ASSERT(to.IsTagged() || to.IsDouble());
- BitVector visited(GetMaximumValueID());
- PropagateMinusZeroChecks(value, &visited);
- }
-
// Insert the representation change right before its use. For phi-uses we
// insert at the end of the corresponding predecessor.
- HBasicBlock* insert_block = use->block();
+ HInstruction* next = NULL;
if (use->IsPhi()) {
int index = 0;
while (use->OperandAt(index) != value) ++index;
- insert_block = insert_block->predecessors()->at(index);
+ next = use->block()->predecessors()->at(index)->end();
+ } else {
+ next = HInstruction::cast(use);
}
- HInstruction* next = (insert_block == use->block())
- ? HInstruction::cast(use)
- : insert_block->end();
-
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
@@ -1985,6 +1958,30 @@
}
+void HGraph::ComputeMinusZeroChecks() {
+ BitVector visited(GetMaximumValueID());
+ for (int i = 0; i < blocks_.length(); ++i) {
+ for (HInstruction* current = blocks_[i]->first();
+ current != NULL;
+ current = current->next()) {
+ if (current->IsChange()) {
+ HChange* change = HChange::cast(current);
+ // Propagate flags for negative zero checks upwards from conversions
+ // int32-to-tagged and int32-to-double.
+ Representation from = change->value()->representation();
+ ASSERT(from.Equals(change->from()));
+ if (from.IsInteger32()) {
+ ASSERT(change->to().IsTagged() || change->to().IsDouble());
+ ASSERT(visited.IsEmpty());
+ PropagateMinusZeroChecks(change->value(), &visited);
+ visited.Clear();
+ }
+ }
+ }
+ }
+}
+
+
// Implementation of utility classes to represent an expression's context in
// the AST.
AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
@@ -2067,8 +2064,8 @@
HGraphBuilder* builder = owner();
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HBranch* branch = new HBranch(empty_true, empty_false, value);
- builder->CurrentBlock()->Finish(branch);
+ HTest* test = new HTest(value, empty_true, empty_false);
+ builder->CurrentBlock()->Finish(test);
HValue* const no_return_value = NULL;
HBasicBlock* true_target = if_true();
@@ -2183,10 +2180,8 @@
}
-HValue* HGraphBuilder::VisitArgument(Expression* expr) {
+void HGraphBuilder::VisitArgument(Expression* expr) {
VisitForValue(expr);
- if (HasStackOverflow() || !subgraph()->HasExit()) return NULL;
- return environment()->Top();
}
@@ -2244,6 +2239,7 @@
graph_->InitializeInferredTypes();
graph_->Canonicalize();
graph_->InsertRepresentationChanges();
+ graph_->ComputeMinusZeroChecks();
// Eliminate redundant stack checks on backwards branches.
HStackCheckEliminator sce(graph_);
@@ -2304,29 +2300,15 @@
}
-void HGraphBuilder::PushArgumentsForStubCall(int argument_count) {
- const int kMaxStubArguments = 4;
- ASSERT_GE(kMaxStubArguments, argument_count);
- // Push the arguments on the stack.
- HValue* arguments[kMaxStubArguments];
- for (int i = argument_count - 1; i >= 0; i--) {
- arguments[i] = Pop();
- }
- for (int i = 0; i < argument_count; i++) {
- AddInstruction(new HPushArgument(arguments[i]));
- }
-}
-
-
-void HGraphBuilder::ProcessCall(HCall* call) {
- for (int i = call->argument_count() - 1; i >= 0; --i) {
- HValue* value = Pop();
- HPushArgument* push = new HPushArgument(value);
- call->SetArgumentAt(i, push);
+void HGraphBuilder::PreProcessCall(HCall* call) {
+ int count = call->argument_count();
+ ZoneList<HValue*> arguments(count);
+ for (int i = 0; i < count; ++i) {
+ arguments.Add(Pop());
}
- for (int i = 0; i < call->argument_count(); ++i) {
- AddInstruction(call->PushArgumentAt(i));
+ while (!arguments.is_empty()) {
+ AddInstruction(new HPushArgument(arguments.RemoveLast()));
}
}
@@ -2596,9 +2578,9 @@
prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block()));
} else {
HBasicBlock* empty = graph()->CreateBasicBlock();
- prev_graph->exit_block()->Finish(new HBranch(empty,
- subgraph->entry_block(),
- prev_compare_inst));
+ prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
+ empty,
+ subgraph->entry_block()));
}
// Build instructions for current subgraph.
@@ -2617,9 +2599,9 @@
if (prev_graph != current_subgraph_) {
last_false_block = graph()->CreateBasicBlock();
HBasicBlock* empty = graph()->CreateBasicBlock();
- prev_graph->exit_block()->Finish(new HBranch(empty,
- last_false_block,
- prev_compare_inst));
+ prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
+ empty,
+ last_false_block));
}
// If we have a non-smi compare clause, we deoptimize after trying
@@ -2702,8 +2684,8 @@
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue();
- HBranch* branch = new HBranch(non_osr_entry, osr_entry, true_value);
- exit_block()->Finish(branch);
+ HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
+ exit_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
non_osr_entry->Goto(loop_predecessor);
@@ -2937,6 +2919,22 @@
if (is_store && lookup->IsReadOnly()) {
BAILOUT("read-only global variable");
}
+ if (lookup->holder() != *global) {
+ BAILOUT("global property on prototype of global object");
+ }
+}
+
+
+HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+ ASSERT(var->IsContextSlot());
+ HInstruction* context = new HContext;
+ AddInstruction(context);
+ int length = graph()->info()->scope()->ContextChainLength(var->scope());
+ while (length-- > 0) {
+ context = new HOuterContext(context);
+ AddInstruction(context);
+ }
+ return context;
}
@@ -2953,16 +2951,9 @@
if (variable->mode() == Variable::CONST) {
BAILOUT("reference to const context slot");
}
- Slot* slot = variable->AsSlot();
- CompilationInfo* info = graph()->info();
- int context_chain_length = info->function()->scope()->
- ContextChainLength(slot->var()->scope());
- ASSERT(context_chain_length >= 0);
- // TODO(antonm): if slot's value is not modified by closures, instead
- // of reading it out of context, we could just embed the value as
- // a constant.
- HLoadContextSlot* instr =
- new HLoadContextSlot(context_chain_length, slot->index());
+ HValue* context = BuildContextChainWalk(variable);
+ int index = variable->AsSlot()->index();
+ HLoadContextSlot* instr = new HLoadContextSlot(context, index);
ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
@@ -3000,7 +2991,10 @@
void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- HObjectLiteral* literal = (new HObjectLiteral(expr->constant_properties(),
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HObjectLiteral* literal = (new HObjectLiteral(context,
+ expr->constant_properties(),
expr->fast_elements(),
expr->literal_index(),
expr->depth()));
@@ -3027,7 +3021,9 @@
VISIT_FOR_VALUE(value);
HValue* value = Pop();
Handle<String> name = Handle<String>::cast(key->handle());
- AddInstruction(new HStoreNamedGeneric(literal, name, value));
+ HStoreNamedGeneric* store =
+ new HStoreNamedGeneric(context, literal, name, value);
+ AddInstruction(store);
AddSimulate(key->id());
} else {
VISIT_FOR_EFFECT(value);
@@ -3105,11 +3101,11 @@
(i == (maps->length() - 1))
? subgraphs->last()
: map_compare_subgraphs.last();
- current_subgraph_->exit_block()->Finish(
- new HCompareMapAndBranch(receiver,
- maps->at(i),
- subgraphs->at(i)->entry_block(),
- else_subgraph->entry_block()));
+ HCompareMap* compare = new HCompareMap(receiver,
+ maps->at(i),
+ subgraphs->at(i)->entry_block(),
+ else_subgraph->entry_block());
+ current_subgraph_->exit_block()->Finish(compare);
map_compare_subgraphs.Add(subgraph);
}
@@ -3117,11 +3113,11 @@
AddInstruction(new HCheckNonSmi(receiver));
HSubgraph* else_subgraph =
(maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
- current_subgraph_->exit_block()->Finish(
- new HCompareMapAndBranch(receiver,
- Handle<Map>(maps->first()),
- subgraphs->first()->entry_block(),
- else_subgraph->entry_block()));
+ HCompareMap* compare = new HCompareMap(receiver,
+ Handle<Map>(maps->first()),
+ subgraphs->first()->entry_block(),
+ else_subgraph->entry_block());
+ current_subgraph_->exit_block()->Finish(compare);
// Join all the call subgraphs in a new basic block and make
// this basic block the current basic block.
@@ -3207,7 +3203,9 @@
HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value) {
- return new HStoreNamedGeneric(object, name, value);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ return new HStoreNamedGeneric(context, object, name, value);
}
@@ -3269,7 +3267,7 @@
// If none of the properties were named fields we generate a
// generic store.
if (maps.length() == 0) {
- HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+ HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
@@ -3283,7 +3281,7 @@
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
} else {
- HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+ HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
@@ -3333,7 +3331,7 @@
return;
} else {
- instr = new HStoreNamedGeneric(object, name, value);
+ instr = BuildStoreNamedGeneric(object, name, value);
}
} else {
@@ -3371,9 +3369,10 @@
LookupGlobalPropertyCell(var, &lookup, true);
CHECK_BAILOUT;
+ bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(graph()->info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr = new HStoreGlobal(value, cell);
+ HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
instr->set_position(position);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id);
@@ -3390,7 +3389,6 @@
// We have a second position recorded in the FullCodeGenerator to have
// type feedback for the binary operation.
BinaryOperation* operation = expr->binary_operation();
- operation->RecordTypeFeedback(oracle());
if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) {
@@ -3453,7 +3451,6 @@
bool is_fast_elements = prop->IsMonomorphic() &&
prop->GetMonomorphicReceiverType()->has_fast_elements();
-
HInstruction* load = is_fast_elements
? BuildLoadKeyedFastElement(obj, key, prop)
: BuildLoadKeyedGeneric(obj, key);
@@ -3500,35 +3497,48 @@
if (proxy->IsArguments()) BAILOUT("assignment to arguments");
// Handle the assignment.
- if (var->is_global()) {
+ if (var->IsStackAllocated()) {
+ HValue* value = NULL;
+ // Handle stack-allocated variables on the right-hand side directly.
+ // We do not allow the arguments object to occur in a context where it
+ // may escape, but assignments to stack-allocated locals are
+ // permitted. Handling such assignments here bypasses the check for
+ // the arguments object in VisitVariableProxy.
+ Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
+ if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
+ value = environment()->Lookup(rhs_var);
+ } else {
+ VISIT_FOR_VALUE(expr->value());
+ value = Pop();
+ }
+ Bind(var, value);
+ ast_context()->ReturnValue(value);
+
+ } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
+ VISIT_FOR_VALUE(expr->value());
+ HValue* context = BuildContextChainWalk(var);
+ int index = var->AsSlot()->index();
+ HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+ AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ast_context()->ReturnValue(Pop());
+
+ } else if (var->is_global()) {
VISIT_FOR_VALUE(expr->value());
HandleGlobalVariableAssignment(var,
Top(),
expr->position(),
expr->AssignmentId());
- } else if (var->IsStackAllocated()) {
- // We allow reference to the arguments object only in assignemtns
- // to local variables to make sure that the arguments object does
- // not escape and is not modified.
- VariableProxy* rhs = expr->value()->AsVariableProxy();
- if (rhs != NULL &&
- rhs->var()->IsStackAllocated() &&
- environment()->Lookup(rhs->var())->CheckFlag(HValue::kIsArguments)) {
- Push(environment()->Lookup(rhs->var()));
- } else {
- VISIT_FOR_VALUE(expr->value());
- }
- Bind(proxy->var(), Top());
+ ast_context()->ReturnValue(Pop());
+
} else {
- BAILOUT("Assigning to no non-stack-allocated/non-global variable");
+ BAILOUT("assignment to LOOKUP or const CONTEXT variable");
}
- // Return the value.
- ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
HandlePropertyAssignment(expr);
} else {
- BAILOUT("unsupported invalid lhs");
+ BAILOUT("invalid left-hand side in assignment");
}
}
@@ -3541,9 +3551,11 @@
VISIT_FOR_VALUE(expr->exception());
HValue* value = environment()->Pop();
- HControlInstruction* instr = new HThrow(value);
+ HThrow* instr = new HThrow(value);
instr->set_position(expr->position());
- current_subgraph_->FinishExit(instr);
+ AddInstruction(instr);
+ AddSimulate(expr->id());
+ current_subgraph_->FinishExit(new HAbnormalExit);
}
@@ -3641,7 +3653,9 @@
Property* expr) {
ASSERT(expr->key()->IsPropertyName());
Handle<Object> name = expr->key()->AsLiteral()->handle();
- return new HLoadNamedGeneric(obj, name);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ return new HLoadNamedGeneric(context, obj, name);
}
@@ -3670,7 +3684,9 @@
HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
- return new HLoadKeyedGeneric(object, key);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ return new HLoadKeyedGeneric(context, object, key);
}
@@ -3698,10 +3714,34 @@
}
+HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object,
+ HValue* key,
+ Property* expr) {
+ ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
+ AddInstruction(new HCheckNonSmi(object));
+ Handle<Map> map = expr->GetMonomorphicReceiverType();
+ ASSERT(!map->has_fast_elements());
+ ASSERT(map->has_pixel_array_elements());
+ AddInstruction(new HCheckMap(object, map));
+ HLoadElements* elements = new HLoadElements(object);
+ AddInstruction(elements);
+ HInstruction* length = AddInstruction(new HPixelArrayLength(elements));
+ AddInstruction(new HBoundsCheck(key, length));
+ HLoadPixelArrayExternalPointer* external_elements =
+ new HLoadPixelArrayExternalPointer(elements);
+ AddInstruction(external_elements);
+ HLoadPixelArrayElement* pixel_array_value =
+ new HLoadPixelArrayElement(external_elements, key);
+ return pixel_array_value;
+}
+
+
HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
HValue* key,
HValue* value) {
- return new HStoreKeyedGeneric(object, key, value);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ return new HStoreKeyedGeneric(context, object, key, value);
}
@@ -3771,6 +3811,14 @@
AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
instr = new HJSArrayLength(array);
+ } else if (expr->IsStringLength()) {
+ HValue* string = Pop();
+ AddInstruction(new HCheckNonSmi(string));
+ AddInstruction(new HCheckInstanceType(string,
+ FIRST_STRING_TYPE,
+ LAST_STRING_TYPE));
+ instr = new HStringLength(string);
+
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
AddInstruction(new HCheckNonSmi(function));
@@ -3797,12 +3845,20 @@
HValue* key = Pop();
HValue* obj = Pop();
- bool is_fast_elements = expr->IsMonomorphic() &&
- expr->GetMonomorphicReceiverType()->has_fast_elements();
-
- instr = is_fast_elements
- ? BuildLoadKeyedFastElement(obj, key, expr)
- : BuildLoadKeyedGeneric(obj, key);
+ if (expr->IsMonomorphic()) {
+ Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
+ // An object has either fast elements or pixel array elements, but never
+ // both. Pixel array maps that are assigned to pixel array elements are
+ // always created with the fast elements flag cleared.
+ if (receiver_type->has_pixel_array_elements()) {
+ instr = BuildLoadKeyedPixelArrayElement(obj, key, expr);
+ } else if (receiver_type->has_fast_elements()) {
+ instr = BuildLoadKeyedFastElement(obj, key, expr);
+ }
+ }
+ if (instr == NULL) {
+ instr = BuildLoadKeyedGeneric(obj, key);
+ }
}
instr->set_position(expr->position());
ast_context()->ReturnInstruction(instr, expr->id());
@@ -3860,7 +3916,7 @@
CHECK_BAILOUT;
HCall* call = new HCallConstantFunction(expr->target(), argument_count);
call->set_position(expr->position());
- ProcessCall(call);
+ PreProcessCall(call);
PushAndAdd(call);
}
subgraphs.Add(subgraph);
@@ -3872,9 +3928,11 @@
// If we couldn't compute the target for any of the maps just perform an
// IC call.
if (maps.length() == 0) {
- HCall* call = new HCallNamed(name, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCall* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
- ProcessCall(call);
+ PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
} else {
// Build subgraph for generic call through IC.
@@ -3884,9 +3942,11 @@
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
} else {
- HCall* call = new HCallNamed(name, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCall* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
- ProcessCall(call);
+ PreProcessCall(call);
PushAndAdd(call);
}
subgraphs.Add(subgraph);
@@ -3957,10 +4017,12 @@
int count_before = AstNode::Count();
// Parse and allocate variables.
- Handle<SharedFunctionInfo> shared(target->shared());
- CompilationInfo inner_info(shared);
+ CompilationInfo inner_info(target);
if (!ParserApi::Parse(&inner_info) ||
!Scope::Analyze(&inner_info)) {
+ if (Top::has_pending_exception()) {
+ SetStackOverflow();
+ }
return false;
}
FunctionLiteral* function = inner_info.function();
@@ -3981,9 +4043,10 @@
// Don't inline functions that uses the arguments object or that
// have a mismatching number of parameters.
+ Handle<SharedFunctionInfo> shared(target->shared());
int arity = expr->arguments()->length();
if (function->scope()->arguments() != NULL ||
- arity != target->shared()->formal_parameter_count()) {
+ arity != shared->formal_parameter_count()) {
return false;
}
@@ -4075,9 +4138,8 @@
// TODO(3168478): refactor to avoid this.
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
- HBranch* branch =
- new HBranch(empty_true, empty_false, return_value);
- body->exit_block()->Finish(branch);
+ HTest* test = new HTest(return_value, empty_true, empty_false);
+ body->exit_block()->Finish(test);
HValue* const no_return_value = NULL;
empty_true->AddLeaveInlined(no_return_value, test_context->if_true());
@@ -4146,12 +4208,29 @@
}
-bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
+bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type) {
+ ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function.
- if (!expr->target()->shared()->IsBuiltinMathFunction()) return false;
+ if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
switch (id) {
+ case kStringCharCodeAt:
+ if (argument_count == 2 && check_type == STRING_CHECK) {
+ HValue* index = Pop();
+ HValue* string = Pop();
+ ASSERT(!expr->holder().is_null());
+ AddInstruction(new HCheckPrototypeMaps(
+ oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
+ expr->holder()));
+ HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+ break;
case kMathRound:
case kMathFloor:
case kMathAbs:
@@ -4159,7 +4238,8 @@
case kMathLog:
case kMathSin:
case kMathCos:
- if (argument_count == 2) {
+ if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* argument = Pop();
Drop(1); // Receiver.
HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
@@ -4169,7 +4249,8 @@
}
break;
case kMathPow:
- if (argument_count == 3) {
+ if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
@@ -4179,8 +4260,6 @@
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
result = new HUnaryMathOperation(left, kMathPowHalf);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
} else if (exponent == -0.5) {
HConstant* double_one =
new HConstant(Handle<Object>(Smi::FromInt(1)),
@@ -4193,22 +4272,18 @@
// an environment simulation here.
ASSERT(!square_root->HasSideEffects());
result = new HDiv(double_one, square_root);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
} else if (exponent == 2.0) {
result = new HMul(left, left);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
}
} else if (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HConstant::cast(right)->Integer32Value() == 2) {
+ HConstant::cast(right)->HasInteger32Value() &&
+ HConstant::cast(right)->Integer32Value() == 2) {
result = new HMul(left, left);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
}
- result = new HPower(left, right);
+ if (result == NULL) {
+ result = new HPower(left, right);
+ }
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -4263,6 +4338,13 @@
}
+static bool HasCustomCallGenerator(Handle<JSFunction> function) {
+ SharedFunctionInfo* info = function->shared();
+ return info->HasBuiltinFunctionId() &&
+ CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
+}
+
+
void HGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
@@ -4285,9 +4367,11 @@
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
- call = new HCallKeyed(key, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ call = new HCallKeyed(context, key, argument_count);
call->set_position(expr->position());
- ProcessCall(call);
+ PreProcessCall(call);
Drop(1); // Key.
ast_context()->ReturnInstruction(call, expr->id());
return;
@@ -4299,7 +4383,7 @@
if (TryCallApply(expr)) return;
CHECK_BAILOUT;
- HValue* receiver = VisitArgument(prop->obj());
+ VisitArgument(prop->obj());
CHECK_BAILOUT;
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
@@ -4309,37 +4393,57 @@
expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
- if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, types->first(), true);
-
- if (TryMathFunctionInline(expr)) {
+ HValue* receiver =
+ environment()->ExpressionStackAt(expr->arguments()->length());
+ if (expr->IsMonomorphic()) {
+ Handle<Map> receiver_map =
+ (types == NULL) ? Handle<Map>::null() : types->first();
+ if (TryInlineBuiltinFunction(expr,
+ receiver,
+ receiver_map,
+ expr->check_type())) {
return;
- } else if (TryInline(expr)) {
- if (subgraph()->HasExit()) {
- HValue* return_value = Pop();
- // If we inlined a function in a test context then we need to emit
- // a simulate here to shadow the ones at the end of the
- // predecessor blocks. Those environments contain the return
- // value on top and do not correspond to any actual state of the
- // unoptimized code.
- if (ast_context()->IsEffect()) AddSimulate(expr->id());
- ast_context()->ReturnValue(return_value);
- }
- return;
- } else {
- // Check for bailout, as the TryInline call in the if condition above
- // might return false due to bailout during hydrogen processing.
- CHECK_BAILOUT;
- call = new HCallConstantFunction(expr->target(), argument_count);
}
+ if (HasCustomCallGenerator(expr->target()) ||
+ expr->check_type() != RECEIVER_MAP_CHECK) {
+ // When the target has a custom call IC generator, use the IC,
+ // because it is likely to generate better code. Also use the
+ // IC when a primitive receiver check is required.
+ HContext* context = new HContext;
+ AddInstruction(context);
+ call = new HCallNamed(context, name, argument_count);
+ } else {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
+
+ if (TryInline(expr)) {
+ if (subgraph()->HasExit()) {
+ HValue* return_value = Pop();
+ // If we inlined a function in a test context then we need to emit
+ // a simulate here to shadow the ones at the end of the
+ // predecessor blocks. Those environments contain the return
+ // value on top and do not correspond to any actual state of the
+ // unoptimized code.
+ if (ast_context()->IsEffect()) AddSimulate(expr->id());
+ ast_context()->ReturnValue(return_value);
+ }
+ return;
+ } else {
+ // Check for bailout, as the TryInline call in the if condition above
+ // might return false due to bailout during hydrogen processing.
+ CHECK_BAILOUT;
+ call = new HCallConstantFunction(expr->target(), argument_count);
+ }
+ }
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
} else {
- call = new HCallNamed(name, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ call = new HCallNamed(context, name, argument_count);
}
} else {
@@ -4364,7 +4468,10 @@
if (known_global_function) {
// Push the global object instead of the global receiver because
// code generated by the full code generator expects it.
- PushAndAdd(new HGlobalObject);
+ HContext* context = new HContext;
+ HGlobalObject* global_object = new HGlobalObject(context);
+ AddInstruction(context);
+ PushAndAdd(global_object);
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
@@ -4373,7 +4480,7 @@
AddInstruction(new HCheckFunction(function, expr->target()));
// Replace the global object with the global receiver.
- HGlobalReceiver* global_receiver = new HGlobalReceiver;
+ HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
// Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
AddInstruction(global_receiver);
@@ -4400,24 +4507,30 @@
call = new HCallKnownGlobal(expr->target(), argument_count);
} else {
- PushAndAdd(new HGlobalObject);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ PushAndAdd(new HGlobalObject(context));
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
- call = new HCallGlobal(var->name(), argument_count);
+ call = new HCallGlobal(context, var->name(), argument_count);
}
} else {
- PushAndAdd(new HGlobalReceiver);
+ HContext* context = new HContext;
+ HGlobalObject* global_object = new HGlobalObject(context);
+ AddInstruction(context);
+ AddInstruction(global_object);
+ PushAndAdd(new HGlobalReceiver(global_object));
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
- call = new HCallFunction(argument_count);
+ call = new HCallFunction(context, argument_count);
}
}
call->set_position(expr->position());
- ProcessCall(call);
+ PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4430,10 +4543,16 @@
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
- int argument_count = expr->arguments()->length() + 1; // Plus constructor.
- HCall* call = new HCallNew(argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+
+ // The constructor is both an operand to the instruction and an argument
+ // to the construct call.
+ int arg_count = expr->arguments()->length() + 1; // Plus constructor.
+ HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
+ HCall* call = new HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
- ProcessCall(call);
+ PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4487,7 +4606,7 @@
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
HCall* call = new HCallRuntime(name, expr->function(), argument_count);
call->set_position(RelocInfo::kNoPosition);
- ProcessCall(call);
+ PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4514,12 +4633,18 @@
// The subexpression does not have side effects.
ast_context()->ReturnValue(graph()->GetConstantFalse());
} else if (prop != NULL) {
- VISIT_FOR_VALUE(prop->obj());
- VISIT_FOR_VALUE(prop->key());
- HValue* key = Pop();
- HValue* obj = Pop();
- ast_context()->ReturnInstruction(new HDeleteProperty(obj, key),
- expr->id());
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ ast_context()->ReturnValue(graph()->GetConstantFalse());
+ } else {
+ VISIT_FOR_VALUE(prop->obj());
+ VISIT_FOR_VALUE(prop->key());
+ HValue* key = Pop();
+ HValue* obj = Pop();
+ HDeleteProperty* instr = new HDeleteProperty(obj, key);
+ ast_context()->ReturnInstruction(instr, expr->id());
+ }
} else if (var->is_global()) {
BAILOUT("delete with global variable");
} else {
@@ -4699,7 +4824,7 @@
HInstruction* store = is_fast_elements
? BuildStoreKeyedFastElement(obj, key, after, prop)
- : new HStoreKeyedGeneric(obj, key, after);
+ : BuildStoreKeyedGeneric(obj, key, after);
AddInstruction(store);
// Drop the key from the bailout environment. Overwrite the receiver
@@ -4720,6 +4845,18 @@
}
+HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
+ HValue* index) {
+ AddInstruction(new HCheckNonSmi(string));
+ AddInstruction(new HCheckInstanceType(
+ string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
+ HStringLength* length = new HStringLength(string);
+ AddInstruction(length);
+ AddInstruction(new HBoundsCheck(index, length));
+ return new HStringCharCodeAt(string, index);
+}
+
+
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right) {
@@ -4761,7 +4898,7 @@
default:
UNREACHABLE();
}
- TypeInfo info = oracle()->BinaryType(expr, TypeFeedbackOracle::RESULT);
+ TypeInfo info = oracle()->BinaryType(expr);
// If we hit an uninitialized binary op stub we will get type info
// for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation.
@@ -4773,7 +4910,12 @@
if (FLAG_trace_representation) {
PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
}
- AssumeRepresentation(instr, ToRepresentation(info));
+ Representation rep = ToRepresentation(info);
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
+ rep = Representation::Integer32();
+ }
+ AssumeRepresentation(instr, rep);
return instr;
}
@@ -4854,7 +4996,8 @@
graph_->GetMaximumValueID());
}
value->ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback.
+ // The representation of the value is dictated by type feedback and
+ // will not be changed later.
value->ClearFlag(HValue::kFlexibleRepresentation);
} else if (FLAG_trace_representation) {
PrintF("No representation assumed\n");
@@ -4906,7 +5049,7 @@
HValue* left = Pop();
Token::Value op = expr->op();
- TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT);
+ TypeInfo info = oracle()->CompareType(expr);
HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
@@ -4938,7 +5081,9 @@
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
- instr = new HInstanceOf(left, right);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ instr = new HInstanceOf(context, left, right);
} else {
AddInstruction(new HCheckFunction(right, target));
instr = new HInstanceOfKnownGlobal(left, target);
@@ -5081,9 +5226,10 @@
}
- // Support for construct call checks.
+// Support for construct call checks.
void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) {
- BAILOUT("inlined runtime function: IsConstructCall");
+ ASSERT(argument_count == 0);
+ ast_context()->ReturnInstruction(new HIsConstructCall, ast_id);
}
@@ -5129,7 +5275,11 @@
// Fast support for charCodeAt(n).
void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) {
- BAILOUT("inlined runtime function: StringCharCodeAt");
+ ASSERT(argument_count == 2);
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
+ ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5143,8 +5293,11 @@
// Fast support for string.charAt(n) and string[n].
void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
- PushArgumentsForStubCall(argument_count);
- HCallStub* result = new HCallStub(CodeStub::StringCharAt, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCallStub* result =
+ new HCallStub(context, CodeStub::StringCharAt, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5173,8 +5326,11 @@
// Fast support for StringAdd.
void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
- PushArgumentsForStubCall(argument_count);
- HCallStub* result = new HCallStub(CodeStub::StringAdd, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCallStub* result =
+ new HCallStub(context, CodeStub::StringAdd, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5182,8 +5338,11 @@
// Fast support for SubString.
void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
ASSERT_EQ(3, argument_count);
- PushArgumentsForStubCall(argument_count);
- HCallStub* result = new HCallStub(CodeStub::SubString, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCallStub* result =
+ new HCallStub(context, CodeStub::SubString, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5191,8 +5350,11 @@
// Fast support for StringCompare.
void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
- PushArgumentsForStubCall(argument_count);
- HCallStub* result = new HCallStub(CodeStub::StringCompare, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCallStub* result =
+ new HCallStub(context, CodeStub::StringCompare, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5200,8 +5362,11 @@
// Support for direct calls from JavaScript to native RegExp code.
void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
ASSERT_EQ(4, argument_count);
- PushArgumentsForStubCall(argument_count);
- HCallStub* result = new HCallStub(CodeStub::RegExpExec, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCallStub* result =
+ new HCallStub(context, CodeStub::RegExpExec, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5210,9 +5375,11 @@
void HGraphBuilder::GenerateRegExpConstructResult(int argument_count,
int ast_id) {
ASSERT_EQ(3, argument_count);
- PushArgumentsForStubCall(argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
HCallStub* result =
- new HCallStub(CodeStub::RegExpConstructResult, argument_count);
+ new HCallStub(context, CodeStub::RegExpConstructResult, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5226,8 +5393,11 @@
// Fast support for number to string.
void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
- PushArgumentsForStubCall(argument_count);
- HCallStub* result = new HCallStub(CodeStub::NumberToString, argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
+ HCallStub* result =
+ new HCallStub(context, CodeStub::NumberToString, argument_count);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@@ -5258,30 +5428,36 @@
void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
- PushArgumentsForStubCall(argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
HCallStub* result =
- new HCallStub(CodeStub::TranscendentalCache, argument_count);
+ new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::SIN);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
- PushArgumentsForStubCall(argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
HCallStub* result =
- new HCallStub(CodeStub::TranscendentalCache, argument_count);
+ new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::COS);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
- PushArgumentsForStubCall(argument_count);
+ HContext* context = new HContext;
+ AddInstruction(context);
HCallStub* result =
- new HCallStub(CodeStub::TranscendentalCache, argument_count);
+ new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::LOG);
+ PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 19f8983..6f41ee6 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -103,16 +103,14 @@
void ClearEnvironment() { last_environment_ = NULL; }
bool HasEnvironment() const { return last_environment_ != NULL; }
void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
- HBasicBlock* parent_loop_header() const {
- if (!HasParentLoopHeader()) return NULL;
- return parent_loop_header_.get();
- }
+ HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
void set_parent_loop_header(HBasicBlock* block) {
- parent_loop_header_.set(block);
+ ASSERT(parent_loop_header_ == NULL);
+ parent_loop_header_ = block;
}
- bool HasParentLoopHeader() const { return parent_loop_header_.is_set(); }
+ bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
void SetJoinId(int id);
@@ -136,9 +134,6 @@
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
- Handle<Object> cond() { return cond_; }
- void set_cond(Handle<Object> value) { cond_ = value; }
-
#ifdef DEBUG
void Verify();
#endif
@@ -166,9 +161,8 @@
int first_instruction_index_;
int last_instruction_index_;
ZoneList<int> deleted_phis_;
- SetOncePointer<HBasicBlock> parent_loop_header_;
+ HBasicBlock* parent_loop_header_;
bool is_inline_return_target_;
- Handle<Object> cond_;
};
@@ -297,7 +291,7 @@
CompilationInfo* info() const { return info_; }
- bool AllowAggressiveOptimizations() const;
+ bool AllowCodeMotion() const;
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -307,6 +301,7 @@
void InitializeInferredTypes();
void InsertTypeConversions();
void InsertRepresentationChanges();
+ void ComputeMinusZeroChecks();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
void Canonicalize();
@@ -705,19 +700,17 @@
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument and wrap it in a PushArgument instruction.
- HValue* VisitArgument(Expression* expr);
+ // Visit an argument subexpression.
+ void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
- void PushArgumentsForStubCall(int argument_count);
-
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
- void ProcessCall(HCall* call);
+ void PreProcessCall(HCall* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@@ -748,7 +741,10 @@
bool TryArgumentsAccess(Property* expr);
bool TryCallApply(Call* expr);
bool TryInline(Call* expr);
- bool TryMathFunctionInline(Call* expr);
+ bool TryInlineBuiltinFunction(Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type);
void TraceInline(Handle<JSFunction> target, bool result);
void HandleGlobalVariableAssignment(Variable* var,
@@ -772,6 +768,8 @@
ZoneMapList* types,
Handle<String> name);
+ HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
+ HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right);
@@ -785,6 +783,9 @@
HInstruction* BuildLoadKeyedFastElement(HValue* object,
HValue* key,
Property* expr);
+ HInstruction* BuildLoadKeyedPixelArrayElement(HValue* object,
+ HValue* key,
+ Property* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
HValue* key);
@@ -817,6 +818,8 @@
HValue* switch_value,
CaseClause* clause);
+ HValue* BuildContextChainWalk(Variable* var);
+
void AddCheckConstantFunction(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 54cfb5c..d5fd7b8 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -49,20 +49,24 @@
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
}
}
@@ -111,6 +115,7 @@
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
@@ -141,6 +146,7 @@
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ CPU::FlushICache(pc_, sizeof(Address));
}
@@ -189,12 +195,14 @@
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -214,12 +222,14 @@
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 552d7b5..e4d09f2 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2559,6 +2559,19 @@
}
+void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x22);
+ emit_sse_operand(dst, src);
+ EMIT(offset);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 20446b0..11f324e 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
@@ -64,30 +64,14 @@
// and best performance in optimized code.
//
struct Register {
- static const int kNumAllocatableRegisters = 5;
+ static const int kNumAllocatableRegisters = 6;
static const int kNumRegisters = 8;
- static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < 4 || reg.code() == 7);
- return (reg.code() == 7) ? 4 : reg.code();
- }
+ static inline const char* AllocationIndexToString(int index);
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return (index == 4) ? from_code(7) : from_code(index);
- }
+ static inline int ToAllocationIndex(Register reg);
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "eax",
- "ecx",
- "edx",
- "ebx",
- "edi"
- };
- return names[index];
- }
+ static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
Register r = { code };
@@ -110,6 +94,7 @@
int code_;
};
+
const Register eax = { 0 };
const Register ecx = { 1 };
const Register edx = { 2 };
@@ -121,6 +106,26 @@
const Register no_reg = { -1 };
+inline const char* Register::AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ // This is the mapping of allocation indices to registers.
+ const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
+ return kNames[index];
+}
+
+
+inline int Register::ToAllocationIndex(Register reg) {
+ ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
+ return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
+}
+
+
+inline Register Register::FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return (index >= 4) ? from_code(index + 2) : from_code(index);
+}
+
+
struct XMMRegister {
static const int kNumAllocatableRegisters = 7;
static const int kNumRegisters = 8;
@@ -928,6 +933,7 @@
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 72213dc..7d70ac3 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -38,6 +38,28 @@
namespace internal {
#define __ ACCESS_MASM(masm)
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in eax.
+ NearLabel check_heap_number, call_builtin;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_heap_number);
+ __ ret(0);
+
+ __ bind(&check_heap_number);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &call_builtin);
+ __ ret(0);
+
+ __ bind(&call_builtin);
+ __ pop(ecx); // Pop return address.
+ __ push(eax);
+ __ push(ecx); // Push return address.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in esi.
@@ -1773,40 +1795,11 @@
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
- // If one of the arguments is a string, call the string add stub.
- // Otherwise, transition to the generic TRBinaryOpIC type.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- NearLabel left_not_string;
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &left_not_string);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
+ GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
@@ -2347,36 +2340,8 @@
__ bind(&call_runtime);
switch (op_) {
case Token::ADD: {
+ GenerateAddStrings(masm);
GenerateRegisterArgsPush(masm);
- // Test for string arguments before calling runtime.
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- lhs = edx;
- rhs = eax;
-
- // Test if left operand is a string.
- NearLabel lhs_not_string;
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &lhs_not_string);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &lhs_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- __ TailCallStub(&string_add_left_stub);
-
- NearLabel call_add_runtime;
- // Left operand is not a string, test right.
- __ bind(&lhs_not_string);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &call_add_runtime);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_add_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_add_runtime);
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
}
@@ -2419,6 +2384,40 @@
}
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ NearLabel call_runtime;
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+
+ // Test if left operand is a string.
+ NearLabel left_not_string;
+ __ test(left, Immediate(kSmiTagMask));
+ __ j(zero, &left_not_string);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &left_not_string);
+
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ test(right, Immediate(kSmiTagMask));
+ __ j(zero, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // Neither argument is a string.
+ __ bind(&call_runtime);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
@@ -3500,10 +3499,12 @@
__ j(not_equal, ¬_minus_half);
// Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
@@ -3515,7 +3516,9 @@
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &call_runtime);
// Calculates square root.
- __ movsd(xmm1, xmm0);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ bind(&allocate_return);
@@ -4661,8 +4664,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
+ bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -4721,6 +4723,23 @@
__ test(ecx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned, not_taken);
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned some failure value.
+ if (FLAG_debug_code) {
+ __ push(edx);
+ __ mov(edx, Operand::StaticVariable(
+ ExternalReference::the_hole_value_location()));
+ NearLabel okay;
+ __ cmp(edx, Operand::StaticVariable(pending_exception_address));
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ j(equal, &okay);
+ __ int3();
+ __ bind(&okay);
+ __ pop(edx);
+ }
+
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(save_doubles_);
__ ret(0);
@@ -4739,7 +4758,6 @@
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
__ mov(eax, Operand::StaticVariable(pending_exception_address));
__ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@@ -6509,6 +6527,54 @@
}
+// Loads a indexed element from a pixel array.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // elements - is set to the the receiver's element if
+ // the receiver doesn't have a pixel array or the
+ // key is not a smi, otherwise it's the elements'
+ // external pointer.
+ // untagged_key - is set to the untagged key
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ mov(untagged_key, key);
+ __ SmiUntag(untagged_key);
+
+ // Verify that the receiver has pixel array elements.
+ __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+ // Key must be in range.
+ __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Perform the indexed load and tag the result as a smi.
+ __ mov(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movzx_b(result, Operand(elements, untagged_key, times_1, 0));
+ __ SmiTag(result);
+ __ ret(0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 4a56d0d..2064574 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -308,6 +308,7 @@
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
@@ -489,6 +490,25 @@
};
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 1ecfd39..b977db8 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -3771,14 +3771,15 @@
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
- masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
+ int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, ecx);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@@ -5587,7 +5588,8 @@
Load(property->value());
if (property->emit_store()) {
Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false);
+ frame_->CallStoreIC(Handle<String>::cast(key), false,
+ strict_mode_flag());
// A test eax instruction following the store IC call would
// indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such
@@ -6102,9 +6104,12 @@
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result);
slow.Bind();
@@ -6121,8 +6126,11 @@
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
@@ -7951,10 +7959,12 @@
__ j(not_equal, ¬_minus_half);
// Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
@@ -7966,7 +7976,9 @@
__ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal);
// Calculates square root.
- __ movsd(xmm1, xmm0);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
JumpTarget done;
@@ -8230,19 +8242,13 @@
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
+ // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(variable->name()));
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(Immediate(variable->name()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}
@@ -9666,7 +9672,7 @@
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual);
+ result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// A test eax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
@@ -9750,7 +9756,7 @@
slow.Bind(&value, &receiver);
frame()->Push(&receiver);
frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual);
+ result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// Encode the offset to the map check instruction and the offset
// to the write barrier store address computation in a test eax
// instruction.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 46b12cb..27e3396 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -365,6 +365,7 @@
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 3050c56..a646052 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -37,9 +37,24 @@
namespace v8 {
namespace internal {
-
int Deoptimizer::table_entry_size_ = 10;
+
+int Deoptimizer::patch_size() {
+ return Assembler::kCallInstructionLength;
+}
+
+
+static void ZapCodeRange(Address start, Address end) {
+#ifdef DEBUG
+ ASSERT(start <= end);
+ int size = end - start;
+ CodePatcher destroyer(start, size);
+ while (size-- > 0) destroyer.masm()->int3();
+#endif
+}
+
+
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
AssertNoAllocation no_allocation;
@@ -47,48 +62,61 @@
// Get the optimized code.
Code* code = function->code();
+ Address code_start_address = code->instruction_start();
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
+ // We will overwrite the code's relocation info in-place. Relocation info
+ // is written backward. The relocation info is the payload of a byte
+ // array. Later on we will slide this to the start of the byte array and
+ // create a filler object in the remaining space.
+ ByteArray* reloc_info = code->relocation_info();
+ Address reloc_end_address = reloc_info->address() + reloc_info->Size();
+ RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
- // For each return after a safepoint insert a absolute call to the
- // corresponding deoptimization entry.
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
+ // For each return after a safepoint insert a call to the corresponding
+ // deoptimization entry. Since the call is a relative encoding, write new
+ // reloc info. We do not need any of the existing reloc info because the
+ // existing code will not be used again (we zap it in debug builds).
+ SafepointTable table(code);
+ Address prev_address = code_start_address;
+ for (unsigned i = 0; i < table.length(); ++i) {
+ Address curr_address = code_start_address + table.GetPcOffset(i);
+ ZapCodeRange(prev_address, curr_address);
+
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- unsigned instructions = pc_offset - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
-#endif
- last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- CodePatcher patcher(
- code->instruction_start() + pc_offset + gap_code_size,
- Assembler::kCallInstructionLength);
- patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
- RelocInfo::NONE);
- last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
+ // The gap code is needed to get to the state expected at the bailout.
+ curr_address += safepoint_entry.gap_code_size();
+
+ CodePatcher patcher(curr_address, patch_size());
+ Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+
+ // We use RUNTIME_ENTRY for deoptimization bailouts.
+ RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
+ RelocInfo::RUNTIME_ENTRY,
+ reinterpret_cast<intptr_t>(deopt_entry));
+ reloc_info_writer.Write(&rinfo);
+
+ curr_address += patch_size();
}
+ prev_address = curr_address;
}
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- unsigned instructions = code->safepoint_table_start() - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
-#endif
+ ZapCodeRange(prev_address,
+ code_start_address + code->safepoint_table_offset());
+
+ // Move the relocation info to the beginning of the byte array.
+ int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
+ memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+
+ // The relocation info is in place, update the size.
+ reloc_info->set_length(new_reloc_size);
+
+ // Handle the junk part after the new relocation info. We will create
+ // a non-live object in the extra space at the end of the former reloc info.
+ Address junk_address = reloc_info->address() + reloc_info->Size();
+ ASSERT(junk_address <= reloc_end_address);
+ Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
@@ -106,44 +134,53 @@
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
- Code* replacement_code) {
- // The stack check code matches the pattern:
- //
- // cmp esp, <limit>
- // jae ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp esp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
- Address call_target_address = rinfo->pc();
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
- rinfo->set_target_address(replacement_code->entry());
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ Address call_target_address = pc_after - kPointerSize;
+ ASSERT(check_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ // The stack check code matches the pattern:
+ //
+ // cmp esp, <limit>
+ // jae ok
+ // call <stack guard>
+ // test eax, <loop nesting depth>
+ // ok: ...
+ //
+ // We will patch away the branch so the code is:
+ //
+ // cmp esp, <limit> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // test eax, <loop nesting depth>
+ // ok:
+ ASSERT(*(call_target_address - 3) == 0x73 && // jae
+ *(call_target_address - 2) == 0x07 && // offset
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x90; // nop
+ *(call_target_address - 2) = 0x90; // nop
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ Address call_target_address = pc_after - kPointerSize;
+ ASSERT(replacement_code->entry() ==
+ Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x90 && // nop
*(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
- rinfo->set_target_address(check_code->entry());
+ Assembler::set_target_address_at(call_target_address,
+ check_code->entry());
}
@@ -507,26 +544,25 @@
__ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
- __ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize));
- __ mov(Operand(ebx, offset), ecx);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(ebx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset = i * kDoubleSize;
__ movdbl(xmm0, Operand(esp, src_offset));
__ movdbl(Operand(ebx, dst_offset), xmm0);
}
- // Remove the bailout id and the general purpose registers from the stack.
+ // Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
- __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize));
+ __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else {
- __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize));
+ __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
@@ -591,7 +627,7 @@
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(ebx, offset));
}
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 4028a93..e0cbe35 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1115,10 +1115,20 @@
get_modrm(*data, &mod, ®op, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
- NameOfXMMRegister(regop),
+ NameOfCPURegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x22) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pinsrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else {
UnimplementedInstruction();
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 2622b5e..18c9319 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -47,8 +47,7 @@
class JumpPatchSite BASE_EMBEDDED {
public:
- explicit JumpPatchSite(MacroAssembler* masm)
- : masm_(masm) {
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
@@ -60,7 +59,7 @@
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
+ EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
@@ -206,45 +205,48 @@
Move(dot_arguments_slot, ecx, ebx, edx);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ NearLabel ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, taken);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
-
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
EmitReturnSequence();
}
@@ -307,12 +309,14 @@
// patch with the code required by the debugger.
__ mov(esp, ebp);
__ pop(ebp);
- __ ret((scope()->num_parameters() + 1) * kPointerSize);
+
+ int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
@@ -610,7 +614,8 @@
__ mov(location, src);
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ int offset = Context::SlotOffset(dst->index());
+ ASSERT(!scratch1.is(esi) && !src.is(esi) && !scratch2.is(esi));
__ RecordWrite(scratch1, offset, src, scratch2);
}
}
@@ -666,10 +671,11 @@
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
+ // Check that we're not inside a 'with'.
__ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
@@ -713,18 +719,25 @@
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- VisitForStackValue(prop->obj());
- if (function != NULL) {
- VisitForStackValue(prop->key());
- VisitForAccumulatorValue(function);
- __ pop(ecx);
- } else {
- VisitForAccumulatorValue(prop->key());
- __ mov(ecx, result_register());
- __ mov(result_register(), Factory::the_hole_value());
+ // property. Use (keyed) IC to set the initial value. We cannot
+ // visit the rewrite because it's shared and we risk recording
+ // duplicate AST IDs for bailouts from optimized code.
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- __ pop(edx);
+
+ if (function != NULL) {
+ __ push(eax);
+ VisitForAccumulatorValue(function);
+ __ pop(edx);
+ } else {
+ __ mov(edx, eax);
+ __ mov(eax, Factory::the_hole_value());
+ }
+ ASSERT(prop->key()->AsLiteral() != NULL &&
+ prop->key()->AsLiteral()->handle()->IsSmi());
+ __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -764,6 +777,8 @@
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
+ clause->body_target()->entry_label()->Unuse();
+
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@@ -1122,8 +1137,11 @@
// Check that last extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
- __ mov(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an esi-based operand (the write barrier cannot be allowed to
+ // destroy the esi register).
+ return ContextOperand(context, slot->index());
}
@@ -1995,60 +2013,80 @@
// ecx, and the global object on the stack.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ __ mov(edx, Operand(ebp, SlotOffset(slot)));
+ __ cmp(edx, Factory::the_hole_value());
+ __ j(not_equal, &skip);
+ __ mov(Operand(ebp, SlotOffset(slot)), eax);
+ break;
+ case Slot::CONTEXT: {
+ __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
+ __ mov(edx, ContextOperand(ecx, slot->index()));
+ __ cmp(edx, Factory::the_hole_value());
+ __ j(not_equal, &skip);
+ __ mov(ContextOperand(ecx, slot->index()), eax);
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(edx, eax); // Preserve the stored value in eax.
+ __ RecordWrite(ecx, offset, edx, ebx);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(eax);
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ mov(edx, Operand(ebp, SlotOffset(slot)));
- __ cmp(edx, Factory::the_hole_value());
- __ j(not_equal, &done);
- }
// Perform the assignment.
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, ecx);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ mov(edx, target);
- __ cmp(edx, Factory::the_hole_value());
- __ j(not_equal, &done);
- }
// Perform the assignment and issue the write barrier.
__ mov(target, eax);
// The value of the assignment is in eax. RecordWrite clobbers its
// register arguments.
__ mov(edx, eax);
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ int offset = Context::SlotOffset(slot->index());
__ RecordWrite(ecx, offset, edx, ebx);
break;
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
- __ bind(&done);
}
}
@@ -2268,7 +2306,9 @@
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values.
@@ -2339,16 +2379,21 @@
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
if (prop->is_synthetic()) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForAccumulatorValue(prop->key());
- }
+ // Do not visit the object and key subexpressions (they are shared
+ // by all occurrences of the same rewritten parameter).
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+ Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+ MemOperand operand = EmitSlotSearch(slot, edx);
+ __ mov(edx, operand);
+
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+ __ mov(eax, prop->key()->AsLiteral()->handle());
+
// Record source code position for IC call.
SetSourcePosition(prop->position());
- __ pop(edx); // We do not need to keep the receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -2359,6 +2404,9 @@
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
EmitCallWithStub(expr);
} else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
}
@@ -3675,26 +3723,29 @@
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
- } else {
- // Property or variable reference. Call the delete builtin with
- // object and property name as arguments.
- if (prop != NULL) {
+ } else if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- } else if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ push(Immediate(var->name()));
- } else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(eax);
- __ push(Immediate(var->name()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(eax);
}
+ } else if (var->is_global()) {
+ __ push(GlobalObjectOperand());
+ __ push(Immediate(var->name()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(eax);
}
break;
}
@@ -3738,8 +3789,8 @@
Label no_conversion;
__ test(result_register(), Immediate(kSmiTagMask));
__ j(zero, &no_conversion);
- __ push(result_register());
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
context()->Plug(result_register());
break;
@@ -3859,8 +3910,8 @@
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &no_conversion);
}
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
// Save result for postfix expressions.
@@ -3913,8 +3964,7 @@
// Call stub for +1/-1.
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- TypeRecordingBinaryOpStub stub(expr->binary_op(),
- NO_OVERWRITE);
+ TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index f570fe0..76681ce 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -388,7 +388,8 @@
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm,
+ bool support_wrappers) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@@ -396,7 +397,8 @@
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
+ support_wrappers);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -554,19 +556,15 @@
__ ret(0);
__ bind(&check_pixel_array);
- // Check whether the elements is a pixel array.
- // edx: receiver
- // eax: key
- __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- __ mov(ebx, eax);
- __ SmiUntag(ebx);
- __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
- __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
- __ movzx_b(eax, Operand(eax, ebx, times_1, 0));
- __ SmiTag(eax);
- __ ret(0);
+ GenerateFastPixelArrayLoad(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ eax,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
@@ -718,160 +716,6 @@
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, failed_allocation;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Get the map of the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow, not_taken);
-
- __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow, not_taken);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- Handle<Map> map(Heap::MapForExternalArrayType(array_type));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(map));
- __ j(not_equal, &slow, not_taken);
-
- // eax: key, known to be a smi.
- // edx: receiver, known to be a JSObject.
- // ebx: elements object, known to be an external array.
- // Check that the index is in range.
- __ mov(ecx, eax);
- __ SmiUntag(ecx); // Untag the index.
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(ecx, Operand(ebx, ecx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ fld_s(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // ecx: value
- // For floating-point array type:
- // FP(0): value
-
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (array_type == kExternalIntArray) {
- __ cmp(ecx, 0xC0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(ecx, Immediate(0xC0000000));
- __ j(not_zero, &box_int);
- }
-
- __ mov(eax, ecx);
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (array_type == kExternalIntArray) {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- } else {
- ASSERT(array_type == kExternalUnsignedIntArray);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(ecx);
- __ fild_d(Operand(esp, 0));
- __ pop(ecx);
- __ pop(ecx);
- }
- // FP(0): value
- __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
- // Set the value.
- __ mov(eax, ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
- // Set the value.
- __ mov(eax, ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ mov(eax, ecx);
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ ffree();
- __ fincstp();
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -1031,194 +875,6 @@
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_heap_number;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
- // Get the instance type from the map of the receiver.
- __ CmpInstanceType(edi, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // eax: value
- // edx: receiver, a JSObject
- // ecx: key, a smi
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
- &slow, true);
-
- // Check that the index is in range.
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_equal, &check_heap_number);
- // smi case
- __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
- __ SmiUntag(ecx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ecx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ebx: untagged index
- // edi: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- __ ret(0);
- } else {
- // Need to perform float-to-int conversion.
- // Test the top of the FP stack for NaN.
- Label is_nan;
- __ fucomi(0);
- __ j(parity_even, &is_nan);
-
- if (array_type != kExternalUnsignedIntArray) {
- __ push(ecx); // Make room on stack
- __ fistp_s(Operand(esp, 0));
- __ pop(ecx);
- } else {
- // fistp stores values as signed integers.
- // To represent the entire range, we need to store as a 64-bit
- // int and discard the high 32 bits.
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ fistp_d(Operand(esp, 0));
- __ pop(ecx);
- __ add(Operand(esp), Immediate(kPointerSize));
- }
- // ecx: untagged integer value
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // We also need to explicitly check for +/-Infinity. These are
- // converted to MIN_INT, but we need to be careful not to
- // confuse with legal uses of MIN_INT.
- Label not_infinity;
- // This test would apparently detect both NaN and Infinity,
- // but we've already checked for NaN using the FPU hardware
- // above.
- __ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
- __ and_(edx, 0x7FF0);
- __ cmp(edx, 0x7FF0);
- __ j(not_equal, ¬_infinity);
- __ mov(ecx, 0);
- __ bind(¬_infinity);
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return original value.
-
- __ bind(&is_nan);
- __ ffree();
- __ fincstp();
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), 0);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ Set(ecx, Immediate(0));
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm);
-}
-
-
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
@@ -1832,7 +1488,8 @@
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -1842,7 +1499,8 @@
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
- MONOMORPHIC);
+ MONOMORPHIC,
+ extra_ic_state);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 2d3eac1..7724f1b 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -37,6 +37,8 @@
namespace internal {
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
class SafepointGenerator : public PostCallGenerator {
public:
SafepointGenerator(LCodeGen* codegen,
@@ -75,7 +77,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -155,6 +157,8 @@
// Trace the call.
if (FLAG_trace) {
+ // We have not executed any compiled code yet, so esi still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -365,18 +369,16 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr) {
- if (instr != NULL) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RegisterLazyDeoptimization(instr);
- } else {
- LPointerMap no_pointers(0);
- RecordPosition(no_pointers.position());
- __ call(code, mode);
- RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+ LInstruction* instr,
+ bool adjusted) {
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ if (!adjusted) {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
+ __ call(code, mode);
+ RegisterLazyDeoptimization(instr);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -387,26 +389,20 @@
}
-void LCodeGen::CallRuntime(Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
+void LCodeGen::CallRuntime(Runtime::Function* fun,
+ int argc,
+ LInstruction* instr,
+ bool adjusted) {
ASSERT(instr != NULL);
+ ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
- // Runtime calls to Throw are not supposed to ever return at the
- // call site, so don't register lazy deoptimization for these. We do
- // however have to record a safepoint since throwing exceptions can
- // cause garbage collections.
- // BUG(3243555): register a lazy deoptimization point at throw. We need
- // it to be able to inline functions containing a throw statement.
- if (!instr->IsThrow()) {
- RegisterLazyDeoptimization(instr);
- } else {
- RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
+ if (!adjusted) {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
+ __ CallRuntime(fun, argc);
+ RegisterLazyDeoptimization(instr);
}
@@ -566,37 +562,36 @@
}
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- deoptimization_index);
+ kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
}
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegisters(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register esi always contains a pointer to the context.
- safepoint.DefinePointerRegister(esi);
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+ deoptimization_index);
}
@@ -646,6 +641,7 @@
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@@ -815,7 +811,7 @@
__ test(left, Operand(left));
__ j(not_zero, &done);
if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+ if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@@ -956,19 +952,31 @@
if (BitCast<uint64_t, double>(v) == 0) {
__ xorpd(res, res);
} else {
- int32_t v_int32 = static_cast<int32_t>(v);
- if (static_cast<double>(v_int32) == v) {
- __ push_imm32(v_int32);
- __ cvtsi2sd(res, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kPointerSize));
+ Register temp = ToRegister(instr->TempAt(0));
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope scope(SSE4_1);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(res, Operand(temp));
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ } else {
+ __ xorpd(res, res);
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ }
} else {
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- __ push_imm32(upper);
- __ push_imm32(lower);
- __ movdbl(res, Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ Set(temp, Immediate(upper));
+ __ movd(res, Operand(temp));
+ __ psllq(res, 32);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(xmm0, Operand(temp));
+ __ por(res, xmm0);
+ }
}
}
}
@@ -994,6 +1002,13 @@
}
+void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ mov(result, FieldOperand(array, PixelArray::kLengthOffset));
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1022,7 +1037,7 @@
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr);
+ CallRuntime(Runtime::kThrow, 1, instr, false);
if (FLAG_debug_code) {
Comment("Unreachable code.");
@@ -1094,7 +1109,7 @@
ASSERT(ToRegister(instr->result()).is(eax));
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@@ -1207,6 +1222,7 @@
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
__ pushad();
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@@ -1285,11 +1301,11 @@
NearLabel done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
+ __ mov(ToRegister(result), Factory::true_value());
__ j(cc, &done);
__ bind(&unordered);
- __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
+ __ mov(ToRegister(result), Factory::false_value());
__ bind(&done);
}
@@ -1320,10 +1336,10 @@
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
NearLabel done;
__ j(equal, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
@@ -1348,10 +1364,10 @@
__ cmp(reg, Factory::null_value());
if (instr->is_strict()) {
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
NearLabel done;
__ j(equal, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
} else {
NearLabel true_value, false_value, done;
@@ -1368,10 +1384,10 @@
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value);
__ bind(&false_value);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&true_value);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ bind(&done);
}
}
@@ -1447,11 +1463,11 @@
__ j(true_cond, &is_true);
__ bind(&is_false);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&is_true);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ bind(&done);
}
@@ -1479,10 +1495,10 @@
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ test(input, Immediate(kSmiTagMask));
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
NearLabel done;
__ j(zero, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
@@ -1507,7 +1523,6 @@
}
-
static Condition BranchCondition(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1529,10 +1544,10 @@
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ jmp(&done);
__ bind(&is_false);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
@@ -1559,12 +1574,12 @@
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
NearLabel done;
__ j(not_zero, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
@@ -1653,11 +1668,11 @@
__ j(not_equal, &is_false);
__ bind(&is_true);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ jmp(&done);
__ bind(&is_false);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
@@ -1698,6 +1713,7 @@
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
+ ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -1713,6 +1729,7 @@
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1806,12 +1823,13 @@
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(edi));
__ mov(InstanceofStub::right(), Immediate(instr->function()));
- static const int kAdditionalDelta = 13;
+ static const int kAdditionalDelta = 16;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
__ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->SizeOfCodeGeneratedSince(&before_push_delta));
@@ -1848,7 +1866,7 @@
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
@@ -1871,7 +1889,7 @@
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
@@ -1886,14 +1904,17 @@
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
- // Preserve the return value on the stack and rely on the runtime
- // call to return the value in the same register.
+ // Preserve the return value on the stack and rely on the runtime call
+ // to return the value in the same register. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(eax);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
__ mov(esp, ebp);
__ pop(ebp);
- __ ret((ParameterCount() + 1) * kPointerSize);
+ __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
}
@@ -1909,15 +1930,38 @@
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0));
- __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
+ Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->check_hole_value()) {
+ __ cmp(cell_operand, Factory::the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ // Store the value.
+ __ mov(cell_operand, value);
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- // TODO(antonm): load a context with a separate instruction.
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadContext(result, instr->context_chain_length());
- __ mov(result, ContextOperand(result, instr->slot_index()));
+ __ mov(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ mov(ContextOperand(context, instr->slot_index()), value);
+ if (instr->needs_write_barrier()) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, offset, value, temp);
+ }
}
@@ -1934,6 +1978,7 @@
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
@@ -1986,22 +2031,33 @@
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- ASSERT(instr->result()->Equals(instr->InputAt(0)));
- Register reg = ToRegister(instr->InputAt(0));
- __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
NearLabel done;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(equal, &done);
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ j(equal, &done);
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::fixed_cow_array_map()));
- __ Check(equal, "Check for fast elements failed.");
+ __ Check(equal, "Check for fast elements or pixel array failed.");
__ bind(&done);
}
}
+void LCodeGen::DoLoadPixelArrayExternalPointer(
+ LLoadPixelArrayExternalPointer* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ mov(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
+}
+
+
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
@@ -2024,7 +2080,10 @@
ASSERT(result.is(elements));
// Load the result.
- __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+ __ mov(result, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
// Check for the hole value.
__ cmp(result, Factory::the_hole_value());
@@ -2032,7 +2091,19 @@
}
+void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
+ Register external_elements = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(external_elements));
+
+ // Load the result.
+ __ movzx_b(result, Operand(external_elements, key, times_1, 0));
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(eax));
@@ -2090,24 +2161,36 @@
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
- ASSERT(ToRegister(instr->function()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- // If the receiver is null or undefined, we have to pass the
- // global object as a receiver.
- NearLabel global_receiver, receiver_ok;
- __ cmp(receiver, Factory::null_value());
- __ j(equal, &global_receiver);
- __ cmp(receiver, Factory::undefined_value());
- __ j(not_equal, &receiver_ok);
- __ bind(&global_receiver);
- __ mov(receiver, GlobalObjectOperand());
- __ bind(&receiver_ok);
-
+ Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
+ Register scratch = ToRegister(instr->TempAt(0));
+ ASSERT(receiver.is(eax)); // Used for parameter count.
+ ASSERT(function.is(edi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(eax));
- Label invoke;
+ // If the receiver is null or undefined, we have to pass the global object
+ // as a receiver.
+ NearLabel global_object, receiver_ok;
+ __ cmp(receiver, Factory::null_value());
+ __ j(equal, &global_object);
+ __ cmp(receiver, Factory::undefined_value());
+ __ j(equal, &global_object);
+
+ // The receiver should be a JS object.
+ __ test(receiver, Immediate(kSmiTagMask));
+ DeoptimizeIf(equal, instr->environment());
+ __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
+ DeoptimizeIf(below, instr->environment());
+ __ jmp(&receiver_ok);
+
+ __ bind(&global_object);
+ // TODO(kmillikin): We have a hydrogen value for the global object. See
+ // if it's better to use it than to explicitly fetch it from the context
+ // here.
+ __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -2120,7 +2203,7 @@
// Loop through the arguments pushing them onto the execution
// stack.
- Label loop;
+ NearLabel invoke, loop;
// length is a small non-negative integer, due to the test above.
__ test(length, Operand(length));
__ j(zero, &invoke);
@@ -2131,12 +2214,16 @@
// Invoke the function.
__ bind(&invoke);
- ASSERT(receiver.is(eax));
- v8::internal::ParameterCount actual(eax);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
SafepointGenerator safepoint_generator(this,
- instr->pointer_map(),
- Safepoint::kNoDeoptimizationIndex);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
+ pointers,
+ env->deoptimization_index());
+ v8::internal::ParameterCount actual(eax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
@@ -2150,16 +2237,31 @@
}
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -2173,6 +2275,8 @@
(scope()->num_heap_slots() > 0);
if (change_context) {
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ } else {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
// Set eax to arguments count if adaption is not needed. Assumes that eax
@@ -2193,9 +2297,6 @@
// Setup deoptimization.
RegisterLazyDeoptimization(instr);
-
- // Restore context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -2221,11 +2322,12 @@
Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive,
- // just return it.
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative);
- __ mov(tmp, input_reg);
__ jmp(&done);
__ bind(&negative);
@@ -2237,6 +2339,7 @@
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@@ -2252,14 +2355,25 @@
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
-
- __ bind(&done);
__ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
+ __ bind(&done);
__ PopSafepointRegisters();
}
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ test(input_reg, Operand(input_reg));
+ Label is_positive;
+ __ j(not_sign, &is_positive);
+ __ neg(input_reg);
+ __ test(input_reg, Operand(input_reg));
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
@@ -2284,31 +2398,15 @@
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
} else if (r.IsInteger32()) {
- Register input_reg = ToRegister(instr->InputAt(0));
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
+ EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
- Label not_smi;
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ test(input_reg, Immediate(kSmiTagMask));
__ j(not_zero, deferred->entry());
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
-
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
-
- __ bind(&is_positive);
+ EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
}
@@ -2387,6 +2485,8 @@
__ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
__ ucomisd(xmm_scratch, input_reg);
DeoptimizeIf(equal, instr->environment());
+ __ xorpd(xmm_scratch, xmm_scratch);
+ __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
}
@@ -2451,7 +2551,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@@ -2459,7 +2559,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@@ -2467,7 +2567,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@@ -2505,46 +2605,46 @@
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->InputAt(0)).is(ecx));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -2556,7 +2656,8 @@
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(edi));
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
@@ -2566,7 +2667,7 @@
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, false);
}
@@ -2601,6 +2702,7 @@
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->value()).is(eax));
@@ -2629,19 +2731,27 @@
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements, offset), value);
} else {
- __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
+ __ mov(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
value);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
- __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+ __ lea(key,
+ FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
}
}
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
@@ -2651,6 +2761,152 @@
}
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ NearLabel flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ test(result, Immediate(kIsConsStringMask));
+ __ j(zero, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(not_zero, deferred->entry());
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ test(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzx_w(result,
+ FieldOperand(string,
+ SeqTwoByteString::kHeaderSize + 2 * const_index));
+ } else {
+ __ movzx_w(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzx_b(result, FieldOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ movzx_b(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, Immediate(0));
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ push(Immediate(Smi::FromInt(const_index)));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(eax);
+ }
+ __ SmiUntag(eax);
+ __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ __ mov(result, FieldOperand(string, String::kLengthOffset));
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -2709,6 +2965,7 @@
// integer value.
__ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@@ -2756,6 +3013,7 @@
__ Set(reg, Immediate(0));
__ PushSafepointRegisters();
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@@ -3077,13 +3335,19 @@
InstanceType last = instr->hydrogen()->last();
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
// If there is only one type in the interval check for equality.
if (first == last) {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(first));
DeoptimizeIf(not_equal, instr->environment());
- } else {
+ } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
+ // String has a dedicated bit in instance type.
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(first));
DeoptimizeIf(below, instr->environment());
// Omit check for the last type.
if (last != LAST_TYPE) {
@@ -3165,21 +3429,22 @@
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
} else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
} else {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
@@ -3217,7 +3482,7 @@
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -3229,7 +3494,7 @@
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
__ pop(ebx);
__ bind(&allocated);
@@ -3252,18 +3517,18 @@
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = !instr->hydrogen()->pretenure();
+ bool pretenure = instr->hydrogen()->pretenure();
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
} else {
- __ push(esi);
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? Factory::true_value()
: Factory::false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kNewClosure, 3, instr, false);
}
}
@@ -3275,7 +3540,7 @@
} else {
__ push(ToOperand(input));
}
- CallRuntime(Runtime::kTypeof, 1, instr);
+ CallRuntime(Runtime::kTypeof, 1, instr, false);
}
@@ -3292,11 +3557,11 @@
instr->type_literal());
__ j(final_branch_condition, &true_label);
__ bind(&false_label);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&true_label);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ bind(&done);
}
@@ -3341,9 +3606,9 @@
final_branch_condition = below;
} else if (type_name->Equals(Heap::boolean_symbol())) {
- __ cmp(input, Handle<Object>(Heap::true_value()));
+ __ cmp(input, Factory::true_value());
__ j(equal, true_label);
- __ cmp(input, Handle<Object>(Heap::false_value()));
+ __ cmp(input, Factory::false_value());
final_branch_condition = equal;
} else if (type_name->Equals(Heap::undefined_symbol())) {
@@ -3394,6 +3659,53 @@
}
+void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register result = ToRegister(instr->result());
+ NearLabel true_label;
+ NearLabel false_label;
+ NearLabel done;
+
+ EmitIsConstructCall(result);
+ __ j(equal, &true_label);
+
+ __ mov(result, Factory::false_value());
+ __ jmp(&done);
+
+ __ bind(&true_label);
+ __ mov(result, Factory::true_value());
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ NearLabel check_frame_marker;
+ __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &check_frame_marker);
+ __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@@ -3414,10 +3726,15 @@
} else {
__ push(ToOperand(key));
}
- RecordPosition(instr->pointer_map()->position());
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
SafepointGenerator safepoint_generator(this,
- instr->pointer_map(),
- Safepoint::kNoDeoptimizationIndex);
+ pointers,
+ env->deoptimization_index());
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
}
@@ -3430,7 +3747,7 @@
__ j(above_equal, &done);
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
__ bind(&done);
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index ab62e6f..3ac3a41 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -92,6 +92,7 @@
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -148,17 +149,14 @@
bool GenerateDeferredCode();
bool GenerateSafepointTable();
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
- void CallRuntime(Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
+ void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
+ bool adjusted = true);
+ void CallRuntime(Runtime::Function* fun, int argc, LInstruction* instr,
+ bool adjusted = true);
+ void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
+ bool adjusted = true) {
Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
+ CallRuntime(function, argc, instr, adjusted);
}
// Generate a direct call to a known function. Expects the function
@@ -186,6 +184,7 @@
int ToInteger32(LConstantOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
@@ -196,6 +195,10 @@
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
@@ -223,6 +226,11 @@
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp);
+
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 8886959..45c790f 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -32,12 +32,11 @@
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32), spilled_register_(-1) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- source_uses_[i] = 0;
- destination_uses_[i] = 0;
- }
-}
+ : cgen_(owner),
+ moves_(32),
+ source_uses_(),
+ destination_uses_(),
+ spilled_register_(-1) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
diff --git a/src/ia32/lithium-gap-resolver-ia32.h b/src/ia32/lithium-gap-resolver-ia32.h
index f0bd260..0c81d72 100644
--- a/src/ia32/lithium-gap-resolver-ia32.h
+++ b/src/ia32/lithium-gap-resolver-ia32.h
@@ -30,7 +30,7 @@
#include "v8.h"
-#include "lithium-allocator.h"
+#include "lithium.h"
namespace v8 {
namespace internal {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index cca07c8..0ad3819 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -68,11 +69,33 @@
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs must use a fixed register.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -162,6 +185,12 @@
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@@ -262,7 +291,15 @@
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
}
@@ -385,7 +422,7 @@
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -401,7 +438,6 @@
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -652,16 +688,16 @@
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
+ instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
+ instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
@@ -669,7 +705,10 @@
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -694,7 +733,7 @@
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -739,18 +778,38 @@
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
}
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
+
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->OperandAt(0)->representation().IsInteger32());
ASSERT(instr->OperandAt(1)->representation().IsInteger32());
@@ -883,7 +942,6 @@
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -894,26 +952,19 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch() && !instr->IsGoto()) {
- // TODO(fschneider): Handle branch instructions uniformly like
- // other instructions. This requires us to generate the right
- // branch instruction already at the HIR level.
+ if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
- HBranch* branch = HBranch::cast(current);
- instr->set_hydrogen_value(branch->value());
- HBasicBlock* first = branch->FirstSuccessor();
- HBasicBlock* second = branch->SecondSuccessor();
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -959,7 +1010,7 @@
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
@@ -1037,14 +1088,17 @@
UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
+ LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
+ LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
+ LOperand* context = UseFixed(instance_of->context(), esi);
LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(
- UseFixed(instance_of->left(), InstanceofStub::left()),
- UseFixed(instance_of->right(), InstanceofStub::right()));
+ new LInstanceOfAndBranch(context, left, right);
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
@@ -1061,8 +1115,7 @@
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new LCmpMapAndBranch(value);
@@ -1080,9 +1133,10 @@
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstanceOf* result =
- new LInstanceOf(UseFixed(instr->left(), InstanceofStub::left()),
- UseFixed(instr->right(), InstanceofStub::right()));
+ LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
+ LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
+ LOperand* context = UseFixed(instr->context(), esi);
+ LInstanceOf* result = new LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1101,12 +1155,14 @@
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), edi);
LOperand* receiver = UseFixed(instr->receiver(), eax);
- LOperand* length = UseRegisterAtStart(instr->length());
- LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* length = UseFixed(instr->length(), ebx);
+ LOperand* elements = UseFixed(instr->elements(), ecx);
+ LOperand* temp = FixedTemp(edx);
LApplyArguments* result = new LApplyArguments(function,
receiver,
length,
- elements);
+ elements,
+ temp);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1118,13 +1174,26 @@
}
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new LGlobalObject);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1165,21 +1234,27 @@
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
- return MarkAsCall(DefineFixed(new LCallKeyed(key), eax), instr);
+ argument_count_ -= instr->argument_count();
+ LCallKeyed* result = new LCallKeyed(context, key);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallNamed, eax), instr);
+ LCallNamed* result = new LCallNamed(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal, eax), instr);
+ LCallGlobal* result = new LCallGlobal(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1190,16 +1265,19 @@
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(constructor);
+ LCallNew* result = new LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction, eax), instr);
+ LCallFunction* result = new LCallFunction(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1255,9 +1333,9 @@
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
+ LOperand* dividend = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new LDivI(value, divisor, temp);
+ LDivI* result = new LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1321,8 +1399,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1472,6 +1550,12 @@
}
+LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LPixelArrayLength(array));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
@@ -1485,6 +1569,13 @@
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), eax);
return MarkAsCall(new LThrow(value), instr);
@@ -1600,13 +1691,15 @@
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- int32_t value = instr->Integer32Value();
- return DefineAsRegister(new LConstantI(value));
+ return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
- return DefineAsRegister(new LConstantD(value));
+ LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
+ ? TempRegister()
+ : NULL;
+ return DefineAsRegister(new LConstantD(temp));
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT(instr->handle()));
+ return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1623,12 +1716,31 @@
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+ LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
+ return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- return DefineAsRegister(new LLoadContextSlot);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ LOperand* temp;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ temp = TempRegister();
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ temp = NULL;
+ }
+ return new LStoreContextSlot(context, value, temp);
}
@@ -1640,8 +1752,9 @@
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), eax);
- LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+ LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1656,7 +1769,14 @@
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineSameAsFirst(new LLoadElements(input));
+ return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
+ HLoadPixelArrayExternalPointer* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
@@ -1671,11 +1791,25 @@
}
+LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
+ HLoadPixelArrayElement* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* external_pointer =
+ UseRegisterAtStart(instr->external_pointer());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadPixelArrayElement* result =
+ new LLoadPixelArrayElement(external_pointer, key);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), eax);
- LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+ LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1700,15 +1834,18 @@
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), edx);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), ecx);
- LOperand* val = UseFixed(instr->value(), eax);
+ LOperand* value = UseFixed(instr->value(), eax);
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+ LStoreKeyedGeneric* result =
+ new LStoreKeyedGeneric(context, object, key, value);
+ return MarkAsCall(result, instr);
}
@@ -1726,28 +1863,45 @@
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
- ? TempRegister() : NULL;
+ ? TempRegister()
+ : NULL;
return new LStoreNamedField(obj, val, temp);
}
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), edx);
- LOperand* val = UseFixed(instr->value(), eax);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* value = UseFixed(instr->value(), eax);
- LStoreNamedGeneric* result = new LStoreNamedGeneric(obj, val);
+ LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
+}
+
+
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
}
@@ -1788,8 +1942,10 @@
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallStub, eax), instr);
+ LCallStub* result = new LCallStub(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1819,6 +1975,12 @@
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
+
+LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
+ return DefineAsRegister(new LIsConstructCall);
+}
+
+
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
@@ -1838,10 +2000,11 @@
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
+ ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
LLazyBailout* lazy_bailout = new LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
- instructions_pending_deoptimization_environment_->
+ instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
return result;
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 67f8751..f1b9ffc 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -39,119 +39,8 @@
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LTemplateInstruction
-// LControlInstruction
-// LBranch
-// LClassOfTestAndBranch
-// LCmpJSObjectEqAndBranch
-// LCmpIDAndBranch
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceTypeAndBranch
-// LInstanceOfAndBranch
-// LIsNullAndBranch
-// LIsObjectAndBranch
-// LIsSmiAndBranch
-// LTypeofIsAndBranch
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpJSObjectEq
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LPower
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGap
-// LLabel
-// LGlobalObject
-// LGlobalReceiver
-// LGoto
-// LLazyBailout
-// LLoadGlobal
-// LCheckPrototypeMaps
-// LLoadContextSlot
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LBitNotI
-// LCallNew
-// LCheckFunction
-// LCheckPrototypeMaps
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LDeleteProperty
-// LDoubleToI
-// LFixedArrayLength
-// LHasCachedArrayIndex
-// LHasInstanceType
-// LInteger32ToDouble
-// LIsNull
-// LIsObject
-// LIsSmi
-// LJSArrayLength
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
- V(Constant) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
@@ -185,6 +74,8 @@
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
@@ -195,16 +86,21 @@
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(FixedArrayLength) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
@@ -215,23 +111,21 @@
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
+ V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
+ V(LoadPixelArrayElement) \
+ V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -239,7 +133,9 @@
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
+ V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -248,11 +144,14 @@
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(Throw) \
@@ -287,7 +186,10 @@
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -304,15 +206,14 @@
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -327,41 +228,73 @@
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
-template<typename T, int N>
+template<typename ElementType, int NumElements>
class OperandContainer {
public:
OperandContainer() {
- for (int i = 0; i < N; i++) elems_[i] = NULL;
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
}
- int length() { return N; }
- T& operator[](int i) {
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
}
void PrintOperandsTo(StringStream* stream);
private:
- T elems_[N];
+ ElementType elems_[NumElements];
};
-template<typename T>
-class OperandContainer<T, 0> {
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
-template<int R, int I, int T = 0>
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
@@ -495,11 +428,17 @@
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
+ LOperand* context() { return inputs_[0]; }
+
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
@@ -512,7 +451,7 @@
};
-template<int I, int T = 0>
+template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
DECLARE_INSTRUCTION(ControlInstruction)
@@ -531,16 +470,18 @@
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements) {
+ LOperand* elements,
+ LOperand* temp) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@@ -570,7 +511,7 @@
};
-class LArgumentsLength: public LTemplateInstruction<1, 1> {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -627,7 +568,7 @@
};
-class LCmpID: public LTemplateInstruction<1, 2> {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -644,7 +585,7 @@
};
-class LCmpIDAndBranch: public LControlInstruction<2> {
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -663,7 +604,7 @@
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value;
@@ -677,7 +618,7 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -688,7 +629,7 @@
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -700,7 +641,7 @@
};
-class LIsNull: public LTemplateInstruction<1, 1> {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
@@ -754,7 +695,7 @@
};
-class LIsSmi: public LTemplateInstruction<1, 1> {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
@@ -765,7 +706,7 @@
};
-class LIsSmiAndBranch: public LControlInstruction<1> {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -777,7 +718,7 @@
};
-class LHasInstanceType: public LTemplateInstruction<1, 1> {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -803,7 +744,7 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -814,7 +755,7 @@
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -826,6 +767,24 @@
};
+class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
+ DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
public:
LClassOfTest(LOperand* value, LOperand* temp) {
@@ -856,7 +815,7 @@
};
-class LCmpT: public LTemplateInstruction<1, 2> {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -870,7 +829,7 @@
};
-class LCmpTAndBranch: public LControlInstruction<2> {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -884,25 +843,31 @@
};
-class LInstanceOf: public LTemplateInstruction<1, 2> {
+class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+
+ LOperand* context() { return inputs_[0]; }
};
-class LInstanceOfAndBranch: public LControlInstruction<2> {
+class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+ LOperand* context() { return inputs_[0]; }
};
@@ -935,7 +900,7 @@
};
-class LBitI: public LTemplateInstruction<1, 2> {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -952,7 +917,7 @@
};
-class LShiftI: public LTemplateInstruction<1, 2> {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -972,7 +937,7 @@
};
-class LSubI: public LTemplateInstruction<1, 2> {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -984,48 +949,38 @@
};
-class LConstant: public LTemplateInstruction<1, 0, 0> {
- DECLARE_INSTRUCTION(Constant)
-};
-
-
-class LConstantI: public LConstant {
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantI(int32_t value) : value_(value) { }
- int32_t value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- int32_t value_;
+ int32_t value() const { return hydrogen()->Integer32Value(); }
};
-class LConstantD: public LConstant {
+class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
- explicit LConstantD(double value) : value_(value) { }
- double value() const { return value_; }
+ explicit LConstantD(LOperand* temp) {
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- double value_;
+ double value() const { return hydrogen()->DoubleValue(); }
};
-class LConstantT: public LConstant {
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantT(Handle<Object> value) : value_(value) { }
- Handle<Object> value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- Handle<Object> value_;
+ Handle<Object> value() const { return hydrogen()->handle(); }
};
-class LBranch: public LControlInstruction<1> {
+class LBranch: public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1038,28 +993,28 @@
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
};
-class LJSArrayLength: public LTemplateInstruction<1, 1> {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
@@ -1070,7 +1025,18 @@
};
-class LFixedArrayLength: public LTemplateInstruction<1, 1> {
+class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LPixelArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+};
+
+
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value;
@@ -1093,7 +1059,7 @@
};
-class LThrow: public LTemplateInstruction<0, 1> {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1103,7 +1069,7 @@
};
-class LBitNotI: public LTemplateInstruction<1, 1> {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
@@ -1113,7 +1079,7 @@
};
-class LAddI: public LTemplateInstruction<1, 2> {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1125,7 +1091,7 @@
};
-class LPower: public LTemplateInstruction<1, 2> {
+class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1137,7 +1103,7 @@
};
-class LArithmeticD: public LTemplateInstruction<1, 2> {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1155,7 +1121,7 @@
};
-class LArithmeticT: public LTemplateInstruction<1, 2> {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1173,7 +1139,7 @@
};
-class LReturn: public LTemplateInstruction<0, 1> {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
@@ -1183,7 +1149,7 @@
};
-class LLoadNamedField: public LTemplateInstruction<1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1194,16 +1160,18 @@
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1222,7 +1190,7 @@
};
-class LLoadElements: public LTemplateInstruction<1, 1> {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
@@ -1232,7 +1200,18 @@
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
+class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
+ "load-pixel-array-external-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1247,17 +1226,35 @@
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
+class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
inputs_[1] = key;
}
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
+ "load-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
};
@@ -1268,7 +1265,7 @@
};
-class LStoreGlobal: public LTemplateInstruction<0, 1> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobal(LOperand* value) {
inputs_[0] = value;
@@ -1279,19 +1276,43 @@
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
-class LPushArgument: public LTemplateInstruction<0, 1> {
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1301,15 +1322,45 @@
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LContext: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
};
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
@@ -1325,49 +1376,68 @@
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
virtual void PrintDataTo(StringStream* stream);
+ LOperand* context() { return inputs_[0]; }
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallFunction(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+ LOperand* context() { return inputs_[0]; }
int arity() const { return hydrogen()->argument_count() - 2; }
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
virtual void PrintDataTo(StringStream* stream);
+ LOperand* context() { return inputs_[0]; }
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1385,10 +1455,11 @@
};
-class LCallNew: public LTemplateInstruction<1, 1> {
+class LCallNew: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
@@ -1396,6 +1467,8 @@
virtual void PrintDataTo(StringStream* stream);
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1410,7 +1483,7 @@
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1420,7 +1493,7 @@
};
-class LNumberTagI: public LTemplateInstruction<1, 1> {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1432,7 +1505,7 @@
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberTagD(LOperand* value, LOperand* temp) {
+ LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -1471,7 +1544,7 @@
};
-class LSmiTag: public LTemplateInstruction<1, 1> {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1481,7 +1554,7 @@
};
-class LNumberUntagD: public LTemplateInstruction<1, 1> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -1491,7 +1564,7 @@
};
-class LSmiUntag: public LTemplateInstruction<1, 1> {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1542,13 +1615,21 @@
};
-class LStoreNamedGeneric: public LStoreNamed {
+class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1581,16 +1662,56 @@
};
-class LStoreKeyedGeneric: public LStoreKeyed {
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) { }
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* object,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
};
-class LCheckFunction: public LTemplateInstruction<0, 1> {
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
@@ -1613,7 +1734,7 @@
};
-class LCheckMap: public LTemplateInstruction<0, 1> {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
@@ -1638,7 +1759,7 @@
};
-class LCheckSmi: public LTemplateInstruction<0, 1> {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) {
@@ -1664,10 +1785,16 @@
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LObjectLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+
+ LOperand* context() { return inputs_[0]; }
};
@@ -1687,7 +1814,7 @@
};
-class LTypeof: public LTemplateInstruction<1, 1> {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -1697,7 +1824,7 @@
};
-class LTypeofIs: public LTemplateInstruction<1, 1> {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
@@ -1712,7 +1839,7 @@
};
-class LTypeofIsAndBranch: public LControlInstruction<1> {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1727,7 +1854,7 @@
};
-class LDeleteProperty: public LTemplateInstruction<1, 2> {
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1780,7 +1907,7 @@
pointer_maps_(8),
inlined_closures_(1) { }
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
@@ -1846,7 +1973,7 @@
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
- instructions_pending_deoptimization_environment_(NULL),
+ instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
@@ -1980,7 +2107,7 @@
int argument_count_;
LAllocator* allocator_;
int position_;
- LInstruction* instructions_pending_deoptimization_environment_;
+ LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 10c942a..cd612b5 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -78,11 +78,6 @@
int offset,
Register value,
Register scratch) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are esi.
- ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
-
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
NearLabel done;
@@ -129,11 +124,6 @@
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are esi.
- ASSERT(!object.is(esi) && !value.is(esi) && !address.is(esi));
-
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
@@ -339,7 +329,7 @@
CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
sub(Operand(esp), Immediate(space));
- int offset = -2 * kPointerSize;
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
@@ -382,7 +372,7 @@
// Optionally restore all XMM registers.
if (save_doubles) {
CpuFeatures::Scope scope(SSE2);
- int offset = -2 * kPointerSize;
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
@@ -604,11 +594,11 @@
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
- if (top_reg.is(result)) {
- add(Operand(top_reg), Immediate(object_size));
- } else {
- lea(top_reg, Operand(result, object_size));
+ if (!top_reg.is(result)) {
+ mov(top_reg, result);
}
+ add(Operand(top_reg), Immediate(object_size));
+ j(carry, gc_required, not_taken);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
@@ -657,7 +647,12 @@
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
- lea(result_end, Operand(result, element_count, element_size, header_size));
+
+ // We assume that element_count*element_size + header_size does not
+ // overflow.
+ lea(result_end, Operand(element_count, element_size, header_size));
+ add(result_end, Operand(result));
+ j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -702,6 +697,7 @@
mov(result_end, object_size);
}
add(result_end, Operand(result));
+ j(carry, gc_required, not_taken);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
@@ -1288,7 +1284,7 @@
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address();
cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(Factory::the_hole_value()));
+ Immediate(Factory::the_hole_value()));
j(not_equal, &promote_scheduled_exception, not_taken);
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
@@ -1523,11 +1519,21 @@
mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
}
- // The context may be an intermediate context, not a function context.
- mov(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // Slot is in the current function context.
- // The context may be an intermediate context, not a function context.
- mov(dst, Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mov(dst, esi);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (FLAG_debug_code) {
+ cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ Check(equal, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts");
}
}
@@ -1571,6 +1577,20 @@
}
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+ if (is_uint16(bytes_dropped)) {
+ ret(bytes_dropped);
+ } else {
+ pop(scratch);
+ add(Operand(esp), Immediate(bytes_dropped));
+ push(scratch);
+ ret(0);
+ }
+}
+
+
+
+
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(Operand(esp), Immediate(stack_elements * kPointerSize));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 6f180c6..09584f7 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -258,6 +258,17 @@
j(not_carry, is_smi);
}
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, not_taken);
+ }
+ // Jump if register contain a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ test(value, Immediate(kSmiTagMask));
+ j(not_zero, not_smi_label, not_taken);
+ }
+
// Assumes input is a heap object.
void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
@@ -539,6 +550,10 @@
void Ret();
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 45d63c5..f96ef5c 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -327,28 +327,32 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss) {
+ Label* miss,
+ bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, miss,
+ support_wrappers ? &check_wrapper : miss);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss, not_taken);
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss, not_taken);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
+ }
}
@@ -451,10 +455,9 @@
// Generates call to API function.
-static bool GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- Failure** failure) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : object passing the type check
@@ -516,13 +519,8 @@
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result =
- masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
- return true;
+ return masm->TryCallApiFunctionAndReturn(&fun,
+ argc + kFastApiCallArguments + 1);
}
@@ -535,17 +533,16 @@
arguments_(arguments),
name_(name) {}
- bool Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss,
- Failure** failure) {
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -566,8 +563,7 @@
lookup,
name,
optimization,
- miss,
- failure);
+ miss);
} else {
CompileRegular(masm,
object,
@@ -578,23 +574,22 @@
name,
holder,
miss);
- return true;
+ return Heap::undefined_value(); // Success.
}
}
private:
- bool CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label,
- Failure** failure) {
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -656,11 +651,9 @@
// Invoke function.
if (can_do_fast_api_call) {
- bool success = GenerateFastApiCall(masm, optimization,
- arguments_.immediate(), failure);
- if (!success) {
- return false;
- }
+ MaybeObject* result =
+ GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@@ -679,7 +672,7 @@
FreeSpaceForFastApiCall(masm, scratch1);
}
- return true;
+ return Heap::undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -1057,17 +1050,16 @@
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
@@ -1122,13 +1114,7 @@
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
-
- return true;
+ return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
}
@@ -2280,17 +2266,14 @@
}
if (depth != kInvalidProtoDepth) {
- Failure* failure;
// Move the return address on top of the stack.
__ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
- if (!success) {
- return failure;
- }
+ MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@@ -2335,21 +2318,17 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), ecx);
- Failure* failure;
- bool success = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- edx,
- ebx,
- edi,
- eax,
- &miss,
- &failure);
- if (!success) {
- return failure;
- }
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ edx,
+ ebx,
+ edi,
+ eax,
+ &miss);
+ if (result->IsFailure()) return result;
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2603,14 +2582,24 @@
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
- // Store the value in the cell.
+
+ // Compute the cell operand to use.
+ Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
if (Serializer::enabled()) {
__ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
- } else {
- __ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
+ cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset);
}
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ cmp(cell_operand, Factory::the_hole_value());
+ __ j(equal, &miss);
+
+ // Store the value in the cell.
+ __ mov(cell_operand, eax);
+
// Return the value (register eax).
__ IncrementCounter(&Counters::named_store_global_inline, 1);
__ ret(0);
@@ -2799,12 +2788,11 @@
// -----------------------------------
Label miss;
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+ edi, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2968,12 +2956,11 @@
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
+ ecx, edi, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -3089,7 +3076,7 @@
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss);
+ GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3168,6 +3155,37 @@
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ edx,
+ eax,
+ ecx,
+ ebx,
+ eax,
+ &miss,
+ &miss,
+ &miss);
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
@@ -3306,6 +3324,364 @@
}
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, failed_allocation;
+
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the map of the receiver.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &slow, not_taken);
+
+ __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow, not_taken);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(map));
+ __ j(not_equal, &slow, not_taken);
+
+ // eax: key, known to be a smi.
+ // edx: receiver, known to be a JSObject.
+ // ebx: elements object, known to be an external array.
+ // Check that the index is in range.
+ __ mov(ecx, eax);
+ __ SmiUntag(ecx); // Untag the index.
+ __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
+ // ebx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(ecx, Operand(ebx, ecx, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ fld_s(Operand(ebx, ecx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // ecx: value
+ // For floating-point array type:
+ // FP(0): value
+
+ if (array_type == kExternalIntArray ||
+ array_type == kExternalUnsignedIntArray) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ if (array_type == kExternalIntArray) {
+ __ cmp(ecx, 0xC0000000);
+ __ j(sign, &box_int);
+ } else {
+ ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle either of the top two bits being set in the value.
+ __ test(ecx, Immediate(0xC0000000));
+ __ j(not_zero, &box_int);
+ }
+
+ __ mov(eax, ecx);
+ __ SmiTag(eax);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ if (array_type == kExternalIntArray) {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ } else {
+ ASSERT(array_type == kExternalUnsignedIntArray);
+ // Need to zero-extend the value.
+ // There's no fild variant for unsigned values, so zero-extend
+ // to a 64-bit int manually.
+ __ push(Immediate(0));
+ __ push(ecx);
+ __ fild_d(Operand(esp, 0));
+ __ pop(ecx);
+ __ pop(ecx);
+ }
+ // FP(0): value
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
+ // Set the value.
+ __ mov(eax, ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
+ // Set the value.
+ __ mov(eax, ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else {
+ __ mov(eax, ecx);
+ __ SmiTag(eax);
+ __ ret(0);
+ }
+
+ // If we fail allocation of the HeapNumber, we still have a value on
+ // top of the FPU stack. Remove it.
+ __ bind(&failed_allocation);
+ __ ffree();
+ __ fincstp();
+ // Fall through to slow case.
+
+ // Slow case: Jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(eax); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ // Return the generated code.
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, check_heap_number;
+
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Get the map from the receiver.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ // Get the instance type from the map of the receiver.
+ __ CmpInstanceType(edi, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // eax: value
+ // edx: receiver, a JSObject
+ // ecx: key, a smi
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
+ &slow, true);
+
+ // Check that the index is in range.
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // eax: value
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
+ // ebx: untagged index
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_equal, &check_heap_number);
+ // smi case
+ __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
+ __ SmiUntag(ecx);
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
+ // ecx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0); // Return the original value.
+
+ __ bind(&check_heap_number);
+ // eax: value
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
+ // ebx: untagged index
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &slow);
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
+ // ebx: untagged index
+ // edi: base pointer of external storage
+ if (array_type == kExternalFloatArray) {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // For the moment we make the slow call to the runtime on
+ // processors that don't support SSE2. The code in IntegerConvert
+ // (code-stubs-ia32.cc) is roughly what is needed here though the
+ // conversion failure case does not need to be handled.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ if (array_type != kExternalIntArray &&
+ array_type != kExternalUnsignedIntArray) {
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
+ // ecx: untagged integer value
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatures::Scope scope(SSE3);
+ // fisttp stores values as signed integers. To represent the
+ // entire range of int and unsigned int arrays, store as a
+ // 64-bit int and discard the high 32 bits.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ fisttp_d(Operand(esp, 0));
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ } else {
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope scope(SSE2);
+ // We can easily implement the correct rounding behavior for the
+ // range [0, 2^31-1]. For the time being, to keep this code simple,
+ // make the slow runtime call for values outside this range.
+ // Note: we could do better for signed int arrays.
+ __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ // We will need the key if we have to make the slow runtime call.
+ __ push(ecx);
+ __ LoadPowerOf2(xmm1, ecx, 31);
+ __ pop(ecx);
+ __ ucomisd(xmm1, xmm0);
+ __ j(above_equal, &slow);
+ __ cvttsd2si(ecx, Operand(xmm0));
+ }
+ // ecx: untagged integer value
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
+ }
+ __ ret(0); // Return original value.
+ }
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 11e1aaf..1cc91a9 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -1033,23 +1033,31 @@
}
-Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+Result VirtualFrame::CallStoreIC(Handle<String> name,
+ bool is_contextual,
+ StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in ecx, value in eax, and receiver in edx.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+
Result value = Pop();
+ RelocInfo::Mode mode;
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(eax);
__ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse();
+ mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, eax, edx);
+ mode = RelocInfo::CODE_TARGET;
}
__ mov(ecx, name);
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+ return RawCallCodeObject(ic, mode);
}
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index b9faa46..729469f 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -365,7 +365,8 @@
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual);
+ Result CallStoreIC(Handle<String> name, bool is_contextual,
+ StrictModeFlag strict_mode);
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All three are dropped.
diff --git a/src/ic.cc b/src/ic.cc
index afae323..62ab0f2 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -342,7 +342,10 @@
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
- SetTargetAtAddress(address, initialize_stub());
+ SetTargetAtAddress(address,
+ target->extra_ic_state() == kStoreICStrict
+ ? initialize_stub_strict()
+ : initialize_stub());
}
@@ -367,55 +370,6 @@
}
-Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
- switch (elements_kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return Builtins::builtin(
- Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
- switch (elements_kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return Builtins::builtin(
- Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return Builtins::builtin(
- Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
@@ -871,6 +825,9 @@
}
if (FLAG_use_ic) {
+ Code* non_monomorphic_stub =
+ (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
+
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
@@ -878,22 +835,27 @@
if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(Heap::length_symbol())) {
HandleScope scope;
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
+ if (state == PREMONOMORPHIC) {
+ if (object->IsString()) {
+ Map* map = HeapObject::cast(*object)->map();
+ const int offset = String::kLengthOffset;
+ PatchInlinedLoad(address(), map, offset);
+ set_target(Builtins::builtin(Builtins::LoadIC_StringLength));
+ } else {
+ set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+ }
+ } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
+ set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
+ } else {
+ set_target(non_monomorphic_stub);
+ }
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
object = Handle<Object>(Handle<JSValue>::cast(object)->value());
}
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
- Map* map = HeapObject::cast(*object)->map();
- if (object->IsString()) {
- const int offset = String::kLengthOffset;
- PatchInlinedLoad(address(), map, offset);
- }
-
- Code* target = NULL;
- target = Builtins::builtin(Builtins::LoadIC_StringLength);
- set_target(target);
return Smi::FromInt(String::cast(*object)->length());
}
@@ -902,12 +864,14 @@
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
- Map* map = HeapObject::cast(*object)->map();
- const int offset = JSArray::kLengthOffset;
- PatchInlinedLoad(address(), map, offset);
-
- Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
- set_target(target);
+ if (state == PREMONOMORPHIC) {
+ Map* map = HeapObject::cast(*object)->map();
+ const int offset = JSArray::kLengthOffset;
+ PatchInlinedLoad(address(), map, offset);
+ set_target(Builtins::builtin(Builtins::LoadIC_ArrayLength));
+ } else {
+ set_target(non_monomorphic_stub);
+ }
return JSArray::cast(*object)->length();
}
@@ -917,8 +881,11 @@
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
- Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
- set_target(target);
+ if (state == PREMONOMORPHIC) {
+ set_target(Builtins::builtin(Builtins::LoadIC_FunctionPrototype));
+ } else {
+ set_target(non_monomorphic_stub);
+ }
return Accessors::FunctionGetPrototype(*object, 0);
}
}
@@ -1141,6 +1108,8 @@
}
if (FLAG_use_ic) {
+ // TODO(1073): don't ignore the current stub state.
+
// Use specialized code for getting the length of strings.
if (object->IsString() && name->Equals(Heap::length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
@@ -1238,20 +1207,31 @@
if (use_ic) {
Code* stub = generic_stub();
- if (object->IsString() && key->IsNumber()) {
- stub = string_stub();
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- stub = external_array_stub(receiver->GetElementsKind());
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (state == UNINITIALIZED &&
- key->IsSmi() &&
- receiver->map()->has_fast_elements()) {
- MaybeObject* probe = StubCache::ComputeKeyedLoadSpecialized(*receiver);
- stub =
- probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
+ if (state == UNINITIALIZED) {
+ if (object->IsString() && key->IsNumber()) {
+ stub = string_stub();
+ } else if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->HasExternalArrayElements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
+ false);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (receiver->HasIndexedInterceptor()) {
+ stub = indexed_interceptor_stub();
+ } else if (receiver->HasPixelElements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadPixelArray(*receiver);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (key->IsSmi() &&
+ receiver->map()->has_fast_elements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadSpecialized(*receiver);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ }
}
}
if (stub != NULL) set_target(stub);
@@ -1391,6 +1371,7 @@
MaybeObject* StoreIC::Store(State state,
+ Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name,
Handle<Object> value) {
@@ -1420,8 +1401,10 @@
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
- Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
- set_target(target);
+ Builtins::Name target = (extra_ic_state == kStoreICStrict)
+ ? Builtins::StoreIC_ArrayLength_Strict
+ : Builtins::StoreIC_ArrayLength;
+ set_target(Builtins::builtin(target));
return receiver->SetProperty(*name, *value, NONE);
}
@@ -1479,15 +1462,23 @@
// If no inlined store ic was patched, generate a stub for this
// store.
- UpdateCaches(&lookup, state, receiver, name, value);
+ UpdateCaches(&lookup, state, extra_ic_state, receiver, name, value);
+ } else {
+ // Strict mode doesn't allow setting non-existent global property.
+ if (extra_ic_state == kStoreICStrict && IsContextual(object)) {
+ return ReferenceError("not_defined", name);
+ }
}
}
if (receiver->IsJSGlobalProxy()) {
// Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver.
- if (target() != global_proxy_stub()) {
- set_target(global_proxy_stub());
+ Code* stub = (extra_ic_state == kStoreICStrict)
+ ? global_proxy_stub_strict()
+ : global_proxy_stub();
+ if (target() != stub) {
+ set_target(stub);
#ifdef DEBUG
TraceIC("StoreIC", name, state, target());
#endif
@@ -1501,6 +1492,7 @@
void StoreIC::UpdateCaches(LookupResult* lookup,
State state,
+ Code::ExtraICState extra_ic_state,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
@@ -1521,8 +1513,8 @@
Object* code = NULL;
switch (type) {
case FIELD: {
- maybe_code = StubCache::ComputeStoreField(*name, *receiver,
- lookup->GetFieldIndex());
+ maybe_code = StubCache::ComputeStoreField(
+ *name, *receiver, lookup->GetFieldIndex(), NULL, extra_ic_state);
break;
}
case MAP_TRANSITION: {
@@ -1531,8 +1523,8 @@
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
- maybe_code = StubCache::ComputeStoreField(*name, *receiver,
- index, *transition);
+ maybe_code = StubCache::ComputeStoreField(
+ *name, *receiver, index, *transition, extra_ic_state);
break;
}
case NORMAL: {
@@ -1543,10 +1535,11 @@
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- maybe_code = StubCache::ComputeStoreGlobal(*name, *global, cell);
+ maybe_code = StubCache::ComputeStoreGlobal(
+ *name, *global, cell, extra_ic_state);
} else {
if (lookup->holder() != *receiver) return;
- maybe_code = StubCache::ComputeStoreNormal();
+ maybe_code = StubCache::ComputeStoreNormal(extra_ic_state);
}
break;
}
@@ -1554,12 +1547,14 @@
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->setter()) == 0) return;
- maybe_code = StubCache::ComputeStoreCallback(*name, *receiver, callback);
+ maybe_code = StubCache::ComputeStoreCallback(
+ *name, *receiver, callback, extra_ic_state);
break;
}
case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- maybe_code = StubCache::ComputeStoreInterceptor(*name, *receiver);
+ maybe_code = StubCache::ComputeStoreInterceptor(
+ *name, *receiver, extra_ic_state);
break;
}
default:
@@ -1575,7 +1570,11 @@
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
// Only move to megamorphic if the target changes.
- if (target() != Code::cast(code)) set_target(megamorphic_stub());
+ if (target() != Code::cast(code)) {
+ set_target(extra_ic_state == kStoreICStrict
+ ? megamorphic_stub_strict()
+ : megamorphic_stub());
+ }
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
StubCache::Set(*name, receiver->map(), Code::cast(code));
@@ -1636,7 +1635,10 @@
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
- stub = external_array_stub(receiver->GetElementsKind());
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
+ stub =
+ probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (state == UNINITIALIZED &&
key->IsSmi() &&
receiver->map()->has_fast_elements()) {
@@ -1815,8 +1817,9 @@
ASSERT(args.length() == 3);
StoreIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Store(state, args.at<Object>(0), args.at<String>(1),
- args.at<Object>(2));
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ return ic.Store(state, extra_ic_state, args.at<Object>(0),
+ args.at<String>(1), args.at<Object>(2));
}
@@ -2081,6 +2084,8 @@
}
if (left_type.IsInteger32() && right_type.IsInteger32()) {
+ // Platforms with 32-bit Smis have no distinct INT32 type.
+ if (kSmiValueSize == 32) return SMI;
return INT32;
}
@@ -2124,9 +2129,11 @@
}
if (type == TRBinaryOpIC::SMI &&
previous_type == TRBinaryOpIC::SMI) {
- if (op == Token::DIV || op == Token::MUL) {
+ if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
+ // With 32-bit Smis, all overflows give heap numbers, but with
+ // 31-bit Smis, most operations overflow to int32 results.
result_type = TRBinaryOpIC::HEAP_NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
@@ -2141,8 +2148,6 @@
Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
if (!code.is_null()) {
- TRBinaryOpIC ic;
- ic.patch(*code);
if (FLAG_trace_ic) {
PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
TRBinaryOpIC::GetName(previous_type),
@@ -2150,6 +2155,8 @@
TRBinaryOpIC::GetName(result_type),
Token::Name(op));
}
+ TRBinaryOpIC ic;
+ ic.patch(*code);
// Activate inlined smi code.
if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
diff --git a/src/ic.h b/src/ic.h
index 9996aff..3b10d06 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -284,7 +284,8 @@
// Specialized code generator routines.
static void GenerateArrayLength(MacroAssembler* masm);
- static void GenerateStringLength(MacroAssembler* masm);
+ static void GenerateStringLength(MacroAssembler* masm,
+ bool support_wrappers);
static void GenerateFunctionPrototype(MacroAssembler* masm);
// Clear the use of the inlined version.
@@ -345,12 +346,6 @@
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
- // Generators for external array types. See objects.h.
- // These are similar to the generic IC; they optimize the case of
- // operating upon external array types but fall back to the runtime
- // for all other types.
- static void GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type);
static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version.
@@ -386,7 +381,6 @@
static Code* string_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_String);
}
- static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static Code* indexed_interceptor_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
@@ -404,9 +398,16 @@
class StoreIC: public IC {
public:
+
+ enum StoreICStrictMode {
+ kStoreICNonStrict = kNonStrictMode,
+ kStoreICStrict = kStrictMode
+ };
+
StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
MUST_USE_RESULT MaybeObject* Store(State state,
+ Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name,
Handle<Object> value);
@@ -414,7 +415,8 @@
// Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm,
+ Code::ExtraICState extra_ic_state);
static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateGlobalProxy(MacroAssembler* masm);
@@ -430,7 +432,9 @@
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state, Handle<JSObject> receiver,
+ State state,
+ Code::ExtraICState extra_ic_state,
+ Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value);
@@ -438,12 +442,21 @@
static Code* megamorphic_stub() {
return Builtins::builtin(Builtins::StoreIC_Megamorphic);
}
+ static Code* megamorphic_stub_strict() {
+ return Builtins::builtin(Builtins::StoreIC_Megamorphic_Strict);
+ }
static Code* initialize_stub() {
return Builtins::builtin(Builtins::StoreIC_Initialize);
}
+ static Code* initialize_stub_strict() {
+ return Builtins::builtin(Builtins::StoreIC_Initialize_Strict);
+ }
static Code* global_proxy_stub() {
return Builtins::builtin(Builtins::StoreIC_GlobalProxy);
}
+ static Code* global_proxy_stub_strict() {
+ return Builtins::builtin(Builtins::StoreIC_GlobalProxy_Strict);
+ }
static void Clear(Address address, Code* target);
@@ -470,13 +483,6 @@
static void GenerateRuntimeSetProperty(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
- // Generators for external array types. See objects.h.
- // These are similar to the generic IC; they optimize the case of
- // operating upon external array types but fall back to the runtime
- // for all other types.
- static void GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type);
-
// Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address);
@@ -501,7 +507,6 @@
static Code* generic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
}
- static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target);
diff --git a/src/json.js b/src/json.js
index e90d5d1..e6ada51 100644
--- a/src/json.js
+++ b/src/json.js
@@ -38,7 +38,7 @@
}
} else {
for (var p in val) {
- if (ObjectHasOwnProperty.call(val, p)) {
+ if (%_CallFunction(val, p, ObjectHasOwnProperty)) {
var newElement = Revive(val, p, reviver);
if (IS_UNDEFINED(newElement)) {
delete val[p];
@@ -101,7 +101,7 @@
if (IS_ARRAY(replacer)) {
var length = replacer.length;
for (var i = 0; i < length; i++) {
- if (ObjectHasOwnProperty.call(replacer, i)) {
+ if (%_CallFunction(replacer, i, ObjectHasOwnProperty)) {
var p = replacer[i];
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
@@ -114,7 +114,7 @@
}
} else {
for (var p in value) {
- if (ObjectHasOwnProperty.call(value, p)) {
+ if (%_CallFunction(value, p, ObjectHasOwnProperty)) {
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
var member = %QuoteJSONString(p) + ":";
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
new file mode 100644
index 0000000..84c5bbd
--- /dev/null
+++ b/src/lithium-allocator-inl.h
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
+#define V8_LITHIUM_ALLOCATOR_INL_H_
+
+#include "lithium-allocator.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
+
+
+LInstruction* LAllocator::InstructionAt(int index) {
+ return chunk_->instructions()->at(index);
+}
+
+
+LGap* LAllocator::GapAt(int index) {
+ return chunk_->GetGapAt(index);
+}
+
+
+TempIterator::TempIterator(LInstruction* instr)
+ : instr_(instr),
+ limit_(instr->TempCount()),
+ current_(0) {
+ current_ = AdvanceToNext(0);
+}
+
+
+bool TempIterator::HasNext() { return current_ < limit_; }
+
+
+LOperand* TempIterator::Next() {
+ ASSERT(HasNext());
+ return instr_->TempAt(current_);
+}
+
+
+int TempIterator::AdvanceToNext(int start) {
+ while (start < limit_ && instr_->TempAt(start) == NULL) start++;
+ return start;
+}
+
+
+void TempIterator::Advance() {
+ current_ = AdvanceToNext(current_ + 1);
+}
+
+
+InputIterator::InputIterator(LInstruction* instr)
+ : instr_(instr),
+ limit_(instr->InputCount()),
+ current_(0) {
+ current_ = AdvanceToNext(0);
+}
+
+
+bool InputIterator::HasNext() { return current_ < limit_; }
+
+
+LOperand* InputIterator::Next() {
+ ASSERT(HasNext());
+ return instr_->InputAt(current_);
+}
+
+
+void InputIterator::Advance() {
+ current_ = AdvanceToNext(current_ + 1);
+}
+
+
+int InputIterator::AdvanceToNext(int start) {
+ while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
+ return start;
+}
+
+
+UseIterator::UseIterator(LInstruction* instr)
+ : input_iterator_(instr), env_iterator_(instr->environment()) { }
+
+
+bool UseIterator::HasNext() {
+ return input_iterator_.HasNext() || env_iterator_.HasNext();
+}
+
+
+LOperand* UseIterator::Next() {
+ ASSERT(HasNext());
+ return input_iterator_.HasNext()
+ ? input_iterator_.Next()
+ : env_iterator_.Next();
+}
+
+
+void UseIterator::Advance() {
+ input_iterator_.HasNext()
+ ? input_iterator_.Advance()
+ : env_iterator_.Advance();
+}
+
+} } // namespace v8::internal
+
+#endif // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 2bbc6b6..9f5f1b9 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "lithium-allocator.h"
+#include "lithium-allocator-inl.h"
#include "hydrogen.h"
#include "string-stream.h"
@@ -71,73 +71,24 @@
}
-void LOperand::PrintTo(StringStream* stream) {
- LUnallocated* unalloc = NULL;
- switch (kind()) {
- case INVALID:
- break;
- case UNALLOCATED:
- unalloc = LUnallocated::cast(this);
- stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
- case LUnallocated::NONE:
- break;
- case LUnallocated::FIXED_REGISTER: {
- const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", register_name);
- break;
- }
- case LUnallocated::FIXED_DOUBLE_REGISTER: {
- const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", double_register_name);
- break;
- }
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
- case LUnallocated::MUST_HAVE_REGISTER:
- stream->Add("(R)");
- break;
- case LUnallocated::WRITABLE_REGISTER:
- stream->Add("(WR)");
- break;
- case LUnallocated::SAME_AS_FIRST_INPUT:
- stream->Add("(1)");
- break;
- case LUnallocated::ANY:
- stream->Add("(-)");
- break;
- case LUnallocated::IGNORE:
- stream->Add("(0)");
- break;
- }
- break;
- case CONSTANT_OPERAND:
- stream->Add("[constant:%d]", index());
- break;
- case STACK_SLOT:
- stream->Add("[stack:%d]", index());
- break;
- case DOUBLE_STACK_SLOT:
- stream->Add("[double_stack:%d]", index());
- break;
- case REGISTER:
- stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
- break;
- case DOUBLE_REGISTER:
- stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
- break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
+UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
+ : operand_(operand),
+ hint_(NULL),
+ pos_(pos),
+ next_(NULL),
+ requires_reg_(false),
+ register_beneficial_(true) {
+ if (operand_ != NULL && operand_->IsUnallocated()) {
+ LUnallocated* unalloc = LUnallocated::cast(operand_);
+ requires_reg_ = unalloc->HasRegisterPolicy();
+ register_beneficial_ = !unalloc->HasAnyPolicy();
}
+ ASSERT(pos_.IsValid());
}
-int LOperand::VirtualRegister() {
- LUnallocated* unalloc = LUnallocated::cast(this);
- return unalloc->virtual_register();
+
+bool UsePosition::HasHint() const {
+ return hint_ != NULL && !hint_->IsUnallocated();
}
@@ -190,6 +141,53 @@
#endif
+LiveRange::LiveRange(int id)
+ : id_(id),
+ spilled_(false),
+ assigned_register_(kInvalidAssignment),
+ assigned_register_kind_(NONE),
+ last_interval_(NULL),
+ first_interval_(NULL),
+ first_pos_(NULL),
+ parent_(NULL),
+ next_(NULL),
+ current_interval_(NULL),
+ last_processed_use_(NULL),
+ spill_start_index_(kMaxInt) {
+ spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
+}
+
+
+void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
+ ASSERT(!HasRegisterAssigned() && !IsSpilled());
+ assigned_register_ = reg;
+ assigned_register_kind_ = register_kind;
+ ConvertOperands();
+}
+
+
+void LiveRange::MakeSpilled() {
+ ASSERT(!IsSpilled());
+ ASSERT(TopLevel()->HasAllocatedSpillOperand());
+ spilled_ = true;
+ assigned_register_ = kInvalidAssignment;
+ ConvertOperands();
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+ return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
+}
+
+
+void LiveRange::SetSpillOperand(LOperand* operand) {
+ ASSERT(!operand->IsUnallocated());
+ ASSERT(spill_operand_ != NULL);
+ ASSERT(spill_operand_->IsUnallocated());
+ spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
UsePosition* use_pos = last_processed_use_;
if (use_pos == NULL) use_pos = first_pos();
@@ -534,7 +532,7 @@
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
- int block_count = graph()->blocks()->length();
+ int block_count = graph_->blocks()->length();
live_in_sets_.Initialize(block_count);
live_in_sets_.AddBlock(NULL, block_count);
}
@@ -615,7 +613,7 @@
}
if (is_tagged) {
TraceAlloc("Fixed reg is tagged at %d\n", pos);
- LInstruction* instr = chunk_->instructions()->at(pos);
+ LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(operand);
}
@@ -670,17 +668,17 @@
}
-LGap* LAllocator::GetLastGap(HBasicBlock* block) const {
+LGap* LAllocator::GetLastGap(HBasicBlock* block) {
int last_instruction = block->last_instruction_index();
int index = chunk_->NearestGapPos(last_instruction);
- return chunk_->GetGapAt(index);
+ return GapAt(index);
}
HPhi* LAllocator::LookupPhi(LOperand* operand) const {
if (!operand->IsUnallocated()) return NULL;
int index = operand->VirtualRegister();
- HValue* instr = graph()->LookupValue(index);
+ HValue* instr = graph_->LookupValue(index);
if (instr != NULL && instr->IsPhi()) {
return HPhi::cast(instr);
}
@@ -739,7 +737,7 @@
void LAllocator::AddConstraintsGapMove(int index,
LOperand* from,
LOperand* to) {
- LGap* gap = chunk_->GetGapAt(index);
+ LGap* gap = GapAt(index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
@@ -762,24 +760,24 @@
int start = block->first_instruction_index();
int end = block->last_instruction_index();
for (int i = start; i <= end; ++i) {
- if (chunk_->IsGapAt(i)) {
- InstructionSummary* summary = NULL;
- InstructionSummary* prev_summary = NULL;
- if (i < end) summary = GetSummary(i + 1);
- if (i > start) prev_summary = GetSummary(i - 1);
- MeetConstraintsBetween(prev_summary, summary, i);
+ if (IsGapAt(i)) {
+ LInstruction* instr = NULL;
+ LInstruction* prev_instr = NULL;
+ if (i < end) instr = InstructionAt(i + 1);
+ if (i > start) prev_instr = InstructionAt(i - 1);
+ MeetConstraintsBetween(prev_instr, instr, i);
}
}
}
-void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
- InstructionSummary* second,
+void LAllocator::MeetConstraintsBetween(LInstruction* first,
+ LInstruction* second,
int gap_index) {
// Handle fixed temporaries.
if (first != NULL) {
- for (int i = 0; i < first->TempCount(); ++i) {
- LUnallocated* temp = LUnallocated::cast(first->TempAt(i));
+ for (TempIterator it(first); it.HasNext(); it.Advance()) {
+ LUnallocated* temp = LUnallocated::cast(it.Next());
if (temp->HasFixedPolicy()) {
AllocateFixed(temp, gap_index - 1, false);
}
@@ -812,7 +810,7 @@
// and splitting of live ranges do not account for it.
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
- LGap* gap = chunk_->GetGapAt(gap_index);
+ LGap* gap = GapAt(gap_index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
move->AddMove(first_output, range->GetSpillOperand());
}
@@ -820,8 +818,8 @@
// Handle fixed input operands of second instruction.
if (second != NULL) {
- for (int i = 0; i < second->InputCount(); ++i) {
- LUnallocated* cur_input = LUnallocated::cast(second->InputAt(i));
+ for (UseIterator it(second); it.HasNext(); it.Advance()) {
+ LUnallocated* cur_input = LUnallocated::cast(it.Next());
if (cur_input->HasFixedPolicy()) {
LUnallocated* input_copy = cur_input->CopyUnconstrained();
bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@@ -850,7 +848,7 @@
if (second != NULL && second->Output() != NULL) {
LUnallocated* second_output = LUnallocated::cast(second->Output());
if (second_output->HasSameAsInputPolicy()) {
- LUnallocated* cur_input = LUnallocated::cast(second->InputAt(0));
+ LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
int output_vreg = second_output->VirtualRegister();
int input_vreg = cur_input->VirtualRegister();
@@ -860,7 +858,7 @@
if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
int index = gap_index + 1;
- LInstruction* instr = chunk_->instructions()->at(index);
+ LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(input_copy);
}
@@ -888,9 +886,9 @@
LifetimePosition curr_position =
LifetimePosition::FromInstructionIndex(index);
- if (chunk_->IsGapAt(index)) {
+ if (IsGapAt(index)) {
// We have a gap at this position.
- LGap* gap = chunk_->GetGapAt(index);
+ LGap* gap = GapAt(index);
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
@@ -924,17 +922,17 @@
}
}
} else {
- ASSERT(!chunk_->IsGapAt(index));
- InstructionSummary* summary = GetSummary(index);
+ ASSERT(!IsGapAt(index));
+ LInstruction* instr = InstructionAt(index);
- if (summary != NULL) {
- LOperand* output = summary->Output();
+ if (instr != NULL) {
+ LOperand* output = instr->Output();
if (output != NULL) {
if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
Define(curr_position, output, NULL);
}
- if (summary->IsCall()) {
+ if (instr->IsMarkedAsCall()) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
@@ -945,7 +943,7 @@
}
}
- if (summary->IsCall() || summary->IsSaveDoubles()) {
+ if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
@@ -956,8 +954,8 @@
}
}
- for (int i = 0; i < summary->InputCount(); ++i) {
- LOperand* input = summary->InputAt(i);
+ for (UseIterator it(instr); it.HasNext(); it.Advance()) {
+ LOperand* input = it.Next();
LifetimePosition use_pos;
if (input->IsUnallocated() &&
@@ -971,9 +969,9 @@
if (input->IsUnallocated()) live->Add(input->VirtualRegister());
}
- for (int i = 0; i < summary->TempCount(); ++i) {
- LOperand* temp = summary->TempAt(i);
- if (summary->IsCall()) {
+ for (TempIterator it(instr); it.HasNext(); it.Advance()) {
+ LOperand* temp = it.Next();
+ if (instr->IsMarkedAsCall()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1044,9 +1042,9 @@
void LAllocator::MeetRegisterConstraints() {
- HPhase phase("Register constraints", chunk());
+ HPhase phase("Register constraints", chunk_);
first_artificial_register_ = next_virtual_register_;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i);
MeetRegisterConstraints(block);
@@ -1055,10 +1053,10 @@
void LAllocator::ResolvePhis() {
- HPhase phase("Resolve phis", chunk());
+ HPhase phase("Resolve phis", chunk_);
// Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
HBasicBlock* block = blocks->at(block_id);
ResolvePhis(block);
@@ -1070,9 +1068,7 @@
HBasicBlock* block,
HBasicBlock* pred) {
LifetimePosition pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index()).
- PrevInstruction();
-
+ LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
LifetimePosition cur_start =
LifetimePosition::FromInstructionIndex(block->first_instruction_index());
LiveRange* pred_cover = NULL;
@@ -1098,7 +1094,7 @@
if (!pred_op->Equals(cur_op)) {
LGap* gap = NULL;
if (block->predecessors()->length() == 1) {
- gap = chunk_->GetGapAt(block->first_instruction_index());
+ gap = GapAt(block->first_instruction_index());
} else {
ASSERT(pred->end()->SecondSuccessor() == NULL);
gap = GetLastGap(pred);
@@ -1111,19 +1107,19 @@
LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
int index = pos.InstructionIndex();
- if (chunk_->IsGapAt(index)) {
- LGap* gap = chunk_->GetGapAt(index);
+ if (IsGapAt(index)) {
+ LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove(
pos.IsInstructionStart() ? LGap::START : LGap::END);
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return chunk_->GetGapAt(gap_pos)->GetOrCreateParallelMove(
+ return GapAt(gap_pos)->GetOrCreateParallelMove(
(gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
}
HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
- LGap* gap = chunk_->GetGapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
+ LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
return gap->block();
}
@@ -1170,7 +1166,7 @@
void LAllocator::ResolveControlFlow() {
HPhase phase("Resolve control flow", this);
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = 1; block_id < blocks->length(); ++block_id) {
HBasicBlock* block = blocks->at(block_id);
if (CanEagerlyResolveControlFlow(block)) continue;
@@ -1193,7 +1189,7 @@
HPhase phase("Build live ranges", this);
InitializeLivenessAnalysis();
// Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
HBasicBlock* block = blocks->at(block_id);
BitVector* live = ComputeLiveOut(block);
@@ -1247,7 +1243,7 @@
LifetimePosition start = LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
LifetimePosition end = LifetimePosition::FromInstructionIndex(
- back_edge->last_instruction_index());
+ back_edge->last_instruction_index()).NextInstruction();
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
@@ -1268,7 +1264,7 @@
found = true;
int operand_index = iterator.Current();
PrintF("Function: %s\n",
- *graph()->info()->function()->debug_name()->ToCString());
+ *graph_->info()->function()->debug_name()->ToCString());
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1473,7 +1469,7 @@
if (current->HasAllocatedSpillOperand()) {
TraceAlloc("Live range %d already has a spill operand\n", current->id());
LifetimePosition next_pos = position;
- if (chunk_->IsGapAt(next_pos.InstructionIndex())) {
+ if (IsGapAt(next_pos.InstructionIndex())) {
next_pos = next_pos.NextInstruction();
}
UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
@@ -1560,14 +1556,8 @@
}
-void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
- operand->set_virtual_register(value->id());
- current_summary()->AddInput(operand);
-}
-
-
bool LAllocator::HasTaggedValue(int virtual_register) const {
- HValue* value = graph()->LookupValue(virtual_register);
+ HValue* value = graph_->LookupValue(virtual_register);
if (value == NULL) return false;
return value->representation().IsTagged();
}
@@ -1575,7 +1565,7 @@
RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
if (virtual_register < first_artificial_register_) {
- HValue* value = graph()->LookupValue(virtual_register);
+ HValue* value = graph_->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
}
@@ -1588,39 +1578,8 @@
}
-void LAllocator::MarkAsCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs can use either fixed register or have a short lifetime (be
- // used at start of the instruction).
- InstructionSummary* summary = current_summary();
-#ifdef DEBUG
- ASSERT(summary->Output() == NULL ||
- LUnallocated::cast(summary->Output())->HasFixedPolicy() ||
- !LUnallocated::cast(summary->Output())->HasRegisterPolicy());
- for (int i = 0; i < summary->InputCount(); i++) {
- ASSERT(LUnallocated::cast(summary->InputAt(i))->HasFixedPolicy() ||
- LUnallocated::cast(summary->InputAt(i))->IsUsedAtStart() ||
- !LUnallocated::cast(summary->InputAt(i))->HasRegisterPolicy());
- }
- for (int i = 0; i < summary->TempCount(); i++) {
- ASSERT(LUnallocated::cast(summary->TempAt(i))->HasFixedPolicy() ||
- !LUnallocated::cast(summary->TempAt(i))->HasRegisterPolicy());
- }
-#endif
- summary->MarkAsCall();
-}
-
-
-void LAllocator::MarkAsSaveDoubles() {
- current_summary()->MarkAsSaveDoubles();
-}
-
-
void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
operand->set_virtual_register(instr->id());
- current_summary()->SetOutput(operand);
}
@@ -1629,7 +1588,11 @@
if (!operand->HasFixedPolicy()) {
operand->set_virtual_register(next_virtual_register_++);
}
- current_summary()->AddTemp(operand);
+}
+
+
+void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
+ operand->set_virtual_register(value->id());
}
@@ -1638,34 +1601,6 @@
}
-void LAllocator::BeginInstruction() {
- if (next_summary_ == NULL) {
- next_summary_ = new InstructionSummary();
- }
- summary_stack_.Add(next_summary_);
- next_summary_ = NULL;
-}
-
-
-void LAllocator::SummarizeInstruction(int index) {
- InstructionSummary* sum = summary_stack_.RemoveLast();
- if (summaries_.length() <= index) {
- summaries_.AddBlock(NULL, index + 1 - summaries_.length());
- }
- ASSERT(summaries_[index] == NULL);
- if (sum->Output() != NULL || sum->InputCount() > 0 || sum->TempCount() > 0) {
- summaries_[index] = sum;
- } else {
- next_summary_ = sum;
- }
-}
-
-
-void LAllocator::OmitInstruction() {
- summary_stack_.RemoveLast();
-}
-
-
void LAllocator::AddToActive(LiveRange* range) {
TraceAlloc("Add live range %d to active\n", range->id());
active_live_ranges_.Add(range);
@@ -2011,21 +1946,7 @@
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
- chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
-}
-
-
-void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
- UsePosition* prev_pos = prev->AddUsePosition(
- LifetimePosition::FromInstructionIndex(pos));
- UsePosition* next_pos = next->AddUsePosition(
- LifetimePosition::FromInstructionIndex(pos));
- LOperand* prev_operand = prev_pos->operand();
- LOperand* next_operand = next_pos->operand();
- LGap* gap = chunk_->GetGapAt(pos);
- gap->GetOrCreateParallelMove(LGap::START)->
- AddMove(prev_operand, next_operand);
- next_pos->set_hint(prev_operand);
+ InstructionAt(pos.InstructionIndex())->IsLabel();
}
@@ -2035,6 +1956,11 @@
if (pos.Value() <= range->Start().Value()) return range;
+ // We can't properly connect liveranges if split occured at the end
+ // of control instruction.
+ ASSERT(pos.IsInstructionStart() ||
+ !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
+
LiveRange* result = LiveRangeFor(next_virtual_register_++);
range->SplitAt(pos, result);
return result;
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 48c6563..914a5b6 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -31,6 +31,7 @@
#include "v8.h"
#include "data-flow.h"
+#include "lithium.h"
#include "zone.h"
namespace v8 {
@@ -48,6 +49,8 @@
class LArgument;
class LChunk;
+class LOperand;
+class LUnallocated;
class LConstantOperand;
class LGap;
class LParallelMove;
@@ -149,403 +152,57 @@
};
-class LOperand: public ZoneObject {
- public:
- enum Kind {
- INVALID,
- UNALLOCATED,
- CONSTANT_OPERAND,
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
- };
-
- LOperand() : value_(KindField::encode(INVALID)) { }
-
- Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
- bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
- bool IsStackSlot() const { return kind() == STACK_SLOT; }
- bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
- bool IsRegister() const { return kind() == REGISTER; }
- bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
- bool IsArgument() const { return kind() == ARGUMENT; }
- bool IsUnallocated() const { return kind() == UNALLOCATED; }
- bool Equals(LOperand* other) const { return value_ == other->value_; }
- int VirtualRegister();
-
- void PrintTo(StringStream* stream);
- void ConvertTo(Kind kind, int index) {
- value_ = KindField::encode(kind);
- value_ |= index << kKindFieldWidth;
- ASSERT(this->index() == index);
- }
-
- protected:
- static const int kKindFieldWidth = 3;
- class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
-
- LOperand(Kind kind, int index) { ConvertTo(kind, index); }
-
- unsigned value_;
-};
-
-
-class LUnallocated: public LOperand {
- public:
- enum Policy {
- NONE,
- ANY,
- FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
- MUST_HAVE_REGISTER,
- WRITABLE_REGISTER,
- SAME_AS_FIRST_INPUT,
- IGNORE
- };
-
- // Lifetime of operand inside the instruction.
- enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- USED_AT_START,
-
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- USED_AT_END
- };
-
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
- }
-
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
- }
-
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
- }
-
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 4;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 17;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
-
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
-
- static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
- static const int kMaxFixedIndices = 128;
-
- bool HasIgnorePolicy() const { return policy() == IGNORE; }
- bool HasNoPolicy() const { return policy() == NONE; }
- bool HasAnyPolicy() const {
- return policy() == ANY;
- }
- bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
- }
- bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
- }
- bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
- }
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ &= ~PolicyField::mask();
- value_ |= PolicyField::encode(policy);
- }
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
- }
-
- unsigned virtual_register() const {
- return VirtualRegisterField::decode(value_);
- }
-
- void set_virtual_register(unsigned id) {
- value_ &= ~VirtualRegisterField::mask();
- value_ |= VirtualRegisterField::encode(id);
- }
-
- LUnallocated* CopyUnconstrained() {
- LUnallocated* result = new LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
- }
-
- static LUnallocated* cast(LOperand* op) {
- ASSERT(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
- }
-
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
- }
-
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
- }
-};
-
-
-class LMoveOperands BASE_EMBEDDED {
- public:
- LMoveOperands(LOperand* source, LOperand* destination)
- : source_(source), destination_(destination) {
- }
-
- LOperand* source() const { return source_; }
- void set_source(LOperand* operand) { source_ = operand; }
-
- LOperand* destination() const { return destination_; }
- void set_destination(LOperand* operand) { destination_ = operand; }
-
- // The gap resolver marks moves as "in-progress" by clearing the
- // destination (but not the source).
- bool IsPending() const {
- return destination_ == NULL && source_ != NULL;
- }
-
- // True if this move a move into the given destination operand.
- bool Blocks(LOperand* operand) const {
- return !IsEliminated() && source()->Equals(operand);
- }
-
- // A move is redundant if it's been eliminated, if its source and
- // destination are the same, or if its destination is unneeded.
- bool IsRedundant() const {
- return IsEliminated() || source_->Equals(destination_) || IsIgnored();
- }
-
- bool IsIgnored() const {
- return destination_ != NULL &&
- destination_->IsUnallocated() &&
- LUnallocated::cast(destination_)->HasIgnorePolicy();
- }
-
- // We clear both operands to indicate move that's been eliminated.
- void Eliminate() { source_ = destination_ = NULL; }
- bool IsEliminated() const {
- ASSERT(source_ != NULL || destination_ == NULL);
- return source_ == NULL;
- }
-
- private:
- LOperand* source_;
- LOperand* destination_;
-};
-
-
-class LConstantOperand: public LOperand {
- public:
- static LConstantOperand* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LConstantOperand(index);
- }
-
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand cache[];
-
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
-};
-
-
-class LArgument: public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot: public LOperand {
- public:
- static LStackSlot* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot cache[];
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot: public LOperand {
- public:
- static LDoubleStackSlot* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot cache[];
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister: public LOperand {
- public:
- static LRegister* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LRegister(index);
- }
-
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LRegister cache[];
-
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister: public LOperand {
- public:
- static LDoubleRegister* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister cache[];
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
-
-
// A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses.
-class InstructionSummary: public ZoneObject {
+
+class LInstruction;
+class LEnvironment;
+
+// Iterator for non-null temp operands.
+class TempIterator BASE_EMBEDDED {
public:
- InstructionSummary()
- : output_operand_(NULL),
- input_count_(0),
- operands_(4),
- is_call_(false),
- is_save_doubles_(false) {}
-
- // Output operands.
- LOperand* Output() const { return output_operand_; }
- void SetOutput(LOperand* output) {
- ASSERT(output_operand_ == NULL);
- output_operand_ = output;
- }
-
- // Input operands.
- int InputCount() const { return input_count_; }
- LOperand* InputAt(int i) const {
- ASSERT(i < input_count_);
- return operands_[i];
- }
- void AddInput(LOperand* input) {
- operands_.InsertAt(input_count_, input);
- input_count_++;
- }
-
- // Temporary operands.
- int TempCount() const { return operands_.length() - input_count_; }
- LOperand* TempAt(int i) const { return operands_[i + input_count_]; }
- void AddTemp(LOperand* temp) { operands_.Add(temp); }
-
- void MarkAsCall() { is_call_ = true; }
- bool IsCall() const { return is_call_; }
-
- void MarkAsSaveDoubles() { is_save_doubles_ = true; }
- bool IsSaveDoubles() const { return is_save_doubles_; }
+ inline explicit TempIterator(LInstruction* instr);
+ inline bool HasNext();
+ inline LOperand* Next();
+ inline void Advance();
private:
- LOperand* output_operand_;
- int input_count_;
- ZoneList<LOperand*> operands_;
- bool is_call_;
- bool is_save_doubles_;
+ inline int AdvanceToNext(int start);
+ LInstruction* instr_;
+ int limit_;
+ int current_;
};
+
+// Iterator for non-constant input operands.
+class InputIterator BASE_EMBEDDED {
+ public:
+ inline explicit InputIterator(LInstruction* instr);
+ inline bool HasNext();
+ inline LOperand* Next();
+ inline void Advance();
+
+ private:
+ inline int AdvanceToNext(int start);
+ LInstruction* instr_;
+ int limit_;
+ int current_;
+};
+
+
+class UseIterator BASE_EMBEDDED {
+ public:
+ inline explicit UseIterator(LInstruction* instr);
+ inline bool HasNext();
+ inline LOperand* Next();
+ inline void Advance();
+
+ private:
+ InputIterator input_iterator_;
+ DeepIterator env_iterator_;
+};
+
+
// Representation of the non-empty interval [start,end[.
class UseInterval: public ZoneObject {
public:
@@ -588,27 +245,14 @@
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
- UsePosition(LifetimePosition pos, LOperand* operand)
- : operand_(operand),
- hint_(NULL),
- pos_(pos),
- next_(NULL),
- requires_reg_(false),
- register_beneficial_(true) {
- if (operand_ != NULL && operand_->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
- }
- ASSERT(pos_.IsValid());
- }
+ UsePosition(LifetimePosition pos, LOperand* operand);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
void set_hint(LOperand* hint) { hint_ = hint; }
- bool HasHint() const { return hint_ != NULL && !hint_->IsUnallocated(); }
+ bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
@@ -634,21 +278,7 @@
public:
static const int kInvalidAssignment = 0x7fffffff;
- explicit LiveRange(int id)
- : id_(id),
- spilled_(false),
- assigned_register_(kInvalidAssignment),
- assigned_register_kind_(NONE),
- last_interval_(NULL),
- first_interval_(NULL),
- first_pos_(NULL),
- parent_(NULL),
- next_(NULL),
- current_interval_(NULL),
- last_processed_use_(NULL),
- spill_start_index_(kMaxInt) {
- spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
- }
+ explicit LiveRange(int id);
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
@@ -663,19 +293,8 @@
LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, RegisterKind register_kind) {
- ASSERT(!HasRegisterAssigned() && !IsSpilled());
- assigned_register_ = reg;
- assigned_register_kind_ = register_kind;
- ConvertOperands();
- }
- void MakeSpilled() {
- ASSERT(!IsSpilled());
- ASSERT(TopLevel()->HasAllocatedSpillOperand());
- spilled_ = true;
- assigned_register_ = kInvalidAssignment;
- ConvertOperands();
- }
+ void set_assigned_register(int reg, RegisterKind register_kind);
+ void MakeSpilled();
// Returns use position in this live range that follows both start
// and last processed use position.
@@ -724,17 +343,9 @@
return last_interval_->end();
}
- bool HasAllocatedSpillOperand() const {
- return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
- }
-
+ bool HasAllocatedSpillOperand() const;
LOperand* GetSpillOperand() const { return spill_operand_; }
- void SetSpillOperand(LOperand* operand) {
- ASSERT(!operand->IsUnallocated());
- ASSERT(spill_operand_ != NULL);
- ASSERT(spill_operand_->IsUnallocated());
- spill_operand_->ConvertTo(operand->kind(), operand->index());
- }
+ void SetSpillOperand(LOperand* operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
@@ -821,9 +432,6 @@
public:
explicit LAllocator(int first_virtual_register, HGraph* graph)
: chunk_(NULL),
- summaries_(0),
- next_summary_(NULL),
- summary_stack_(2),
live_in_sets_(0),
live_ranges_(16),
fixed_live_ranges_(8),
@@ -850,27 +458,12 @@
// Record a temporary operand.
void RecordTemporary(LUnallocated* operand);
- // Marks the current instruction as a call.
- void MarkAsCall();
-
- // Marks the current instruction as requiring saving double registers.
- void MarkAsSaveDoubles();
-
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
- // Begin a new instruction.
- void BeginInstruction();
-
- // Summarize the current instruction.
- void SummarizeInstruction(int index);
-
- // Summarize the current instruction.
- void OmitInstruction();
-
// Control max function size.
static int max_initial_value_ids();
@@ -918,8 +511,8 @@
void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
void ProcessInstructions(HBasicBlock* block, BitVector* live);
void MeetRegisterConstraints(HBasicBlock* block);
- void MeetConstraintsBetween(InstructionSummary* first,
- InstructionSummary* second,
+ void MeetConstraintsBetween(LInstruction* first,
+ LInstruction* second,
int gap_index);
void ResolvePhis(HBasicBlock* block);
@@ -984,7 +577,6 @@
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
- void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
// Helper methods for resolving control flow.
void ResolveControlFlow(LiveRange* range,
@@ -998,12 +590,6 @@
// Return the block which contains give lifetime position.
HBasicBlock* GetBlock(LifetimePosition pos);
- // Current active summary.
- InstructionSummary* current_summary() const { return summary_stack_.last(); }
-
- // Get summary for given instruction index.
- InstructionSummary* GetSummary(int index) const { return summaries_[index]; }
-
// Helper methods for the fixed registers.
int RegisterCount() const;
static int FixedLiveRangeID(int index) { return -index - 1; }
@@ -1012,15 +598,17 @@
LiveRange* FixedDoubleLiveRangeFor(int index);
LiveRange* LiveRangeFor(int index);
HPhi* LookupPhi(LOperand* operand) const;
- LGap* GetLastGap(HBasicBlock* block) const;
+ LGap* GetLastGap(HBasicBlock* block);
const char* RegisterName(int allocation_index);
- LChunk* chunk_;
- ZoneList<InstructionSummary*> summaries_;
- InstructionSummary* next_summary_;
+ inline bool IsGapAt(int index);
- ZoneList<InstructionSummary*> summary_stack_;
+ inline LInstruction* InstructionAt(int index);
+
+ inline LGap* GapAt(int index);
+
+ LChunk* chunk_;
// During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed.
diff --git a/src/lithium.cc b/src/lithium.cc
index d6cff25..e829f2f 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -30,6 +30,78 @@
namespace v8 {
namespace internal {
+
+void LOperand::PrintTo(StringStream* stream) {
+ LUnallocated* unalloc = NULL;
+ switch (kind()) {
+ case INVALID:
+ break;
+ case UNALLOCATED:
+ unalloc = LUnallocated::cast(this);
+ stream->Add("v%d", unalloc->virtual_register());
+ switch (unalloc->policy()) {
+ case LUnallocated::NONE:
+ break;
+ case LUnallocated::FIXED_REGISTER: {
+ const char* register_name =
+ Register::AllocationIndexToString(unalloc->fixed_index());
+ stream->Add("(=%s)", register_name);
+ break;
+ }
+ case LUnallocated::FIXED_DOUBLE_REGISTER: {
+ const char* double_register_name =
+ DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+ stream->Add("(=%s)", double_register_name);
+ break;
+ }
+ case LUnallocated::FIXED_SLOT:
+ stream->Add("(=%dS)", unalloc->fixed_index());
+ break;
+ case LUnallocated::MUST_HAVE_REGISTER:
+ stream->Add("(R)");
+ break;
+ case LUnallocated::WRITABLE_REGISTER:
+ stream->Add("(WR)");
+ break;
+ case LUnallocated::SAME_AS_FIRST_INPUT:
+ stream->Add("(1)");
+ break;
+ case LUnallocated::ANY:
+ stream->Add("(-)");
+ break;
+ case LUnallocated::IGNORE:
+ stream->Add("(0)");
+ break;
+ }
+ break;
+ case CONSTANT_OPERAND:
+ stream->Add("[constant:%d]", index());
+ break;
+ case STACK_SLOT:
+ stream->Add("[stack:%d]", index());
+ break;
+ case DOUBLE_STACK_SLOT:
+ stream->Add("[double_stack:%d]", index());
+ break;
+ case REGISTER:
+ stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+ break;
+ case DOUBLE_REGISTER:
+ stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+ break;
+ case ARGUMENT:
+ stream->Add("[arg:%d]", index());
+ break;
+ }
+}
+
+
+int LOperand::VirtualRegister() {
+ LUnallocated* unalloc = LUnallocated::cast(this);
+ return unalloc->virtual_register();
+}
+
+
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
diff --git a/src/lithium.h b/src/lithium.h
index 5f7c92f..a2f9df0 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -29,12 +29,360 @@
#define V8_LITHIUM_H_
#include "hydrogen.h"
-#include "lithium-allocator.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
+class LOperand: public ZoneObject {
+ public:
+ enum Kind {
+ INVALID,
+ UNALLOCATED,
+ CONSTANT_OPERAND,
+ STACK_SLOT,
+ DOUBLE_STACK_SLOT,
+ REGISTER,
+ DOUBLE_REGISTER,
+ ARGUMENT
+ };
+
+ LOperand() : value_(KindField::encode(INVALID)) { }
+
+ Kind kind() const { return KindField::decode(value_); }
+ int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
+ bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
+ bool IsStackSlot() const { return kind() == STACK_SLOT; }
+ bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
+ bool IsRegister() const { return kind() == REGISTER; }
+ bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
+ bool IsArgument() const { return kind() == ARGUMENT; }
+ bool IsUnallocated() const { return kind() == UNALLOCATED; }
+ bool Equals(LOperand* other) const { return value_ == other->value_; }
+ int VirtualRegister();
+
+ void PrintTo(StringStream* stream);
+ void ConvertTo(Kind kind, int index) {
+ value_ = KindField::encode(kind);
+ value_ |= index << kKindFieldWidth;
+ ASSERT(this->index() == index);
+ }
+
+ protected:
+ static const int kKindFieldWidth = 3;
+ class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
+
+ LOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+ unsigned value_;
+};
+
+
+class LUnallocated: public LOperand {
+ public:
+ enum Policy {
+ NONE,
+ ANY,
+ FIXED_REGISTER,
+ FIXED_DOUBLE_REGISTER,
+ FIXED_SLOT,
+ MUST_HAVE_REGISTER,
+ WRITABLE_REGISTER,
+ SAME_AS_FIRST_INPUT,
+ IGNORE
+ };
+
+ // Lifetime of operand inside the instruction.
+ enum Lifetime {
+ // USED_AT_START operand is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ USED_AT_START,
+
+ // USED_AT_END operand is treated as live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ USED_AT_END
+ };
+
+ explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
+ Initialize(policy, 0, USED_AT_END);
+ }
+
+ LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
+ Initialize(policy, fixed_index, USED_AT_END);
+ }
+
+ LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
+ Initialize(policy, 0, lifetime);
+ }
+
+ // The superclass has a KindField. Some policies have a signed fixed
+ // index in the upper bits.
+ static const int kPolicyWidth = 4;
+ static const int kLifetimeWidth = 1;
+ static const int kVirtualRegisterWidth = 17;
+
+ static const int kPolicyShift = kKindFieldWidth;
+ static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
+ static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
+ static const int kFixedIndexShift =
+ kVirtualRegisterShift + kVirtualRegisterWidth;
+
+ class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
+
+ class LifetimeField
+ : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
+ };
+
+ class VirtualRegisterField
+ : public BitField<unsigned,
+ kVirtualRegisterShift,
+ kVirtualRegisterWidth> {
+ };
+
+ static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
+ static const int kMaxFixedIndices = 128;
+
+ bool HasIgnorePolicy() const { return policy() == IGNORE; }
+ bool HasNoPolicy() const { return policy() == NONE; }
+ bool HasAnyPolicy() const {
+ return policy() == ANY;
+ }
+ bool HasFixedPolicy() const {
+ return policy() == FIXED_REGISTER ||
+ policy() == FIXED_DOUBLE_REGISTER ||
+ policy() == FIXED_SLOT;
+ }
+ bool HasRegisterPolicy() const {
+ return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
+ }
+ bool HasSameAsInputPolicy() const {
+ return policy() == SAME_AS_FIRST_INPUT;
+ }
+ Policy policy() const { return PolicyField::decode(value_); }
+ void set_policy(Policy policy) {
+ value_ &= ~PolicyField::mask();
+ value_ |= PolicyField::encode(policy);
+ }
+ int fixed_index() const {
+ return static_cast<int>(value_) >> kFixedIndexShift;
+ }
+
+ unsigned virtual_register() const {
+ return VirtualRegisterField::decode(value_);
+ }
+
+ void set_virtual_register(unsigned id) {
+ value_ &= ~VirtualRegisterField::mask();
+ value_ |= VirtualRegisterField::encode(id);
+ }
+
+ LUnallocated* CopyUnconstrained() {
+ LUnallocated* result = new LUnallocated(ANY);
+ result->set_virtual_register(virtual_register());
+ return result;
+ }
+
+ static LUnallocated* cast(LOperand* op) {
+ ASSERT(op->IsUnallocated());
+ return reinterpret_cast<LUnallocated*>(op);
+ }
+
+ bool IsUsedAtStart() {
+ return LifetimeField::decode(value_) == USED_AT_START;
+ }
+
+ private:
+ void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
+ value_ |= PolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
+ value_ |= fixed_index << kFixedIndexShift;
+ ASSERT(this->fixed_index() == fixed_index);
+ }
+};
+
+
+class LMoveOperands BASE_EMBEDDED {
+ public:
+ LMoveOperands(LOperand* source, LOperand* destination)
+ : source_(source), destination_(destination) {
+ }
+
+ LOperand* source() const { return source_; }
+ void set_source(LOperand* operand) { source_ = operand; }
+
+ LOperand* destination() const { return destination_; }
+ void set_destination(LOperand* operand) { destination_ = operand; }
+
+ // The gap resolver marks moves as "in-progress" by clearing the
+ // destination (but not the source).
+ bool IsPending() const {
+ return destination_ == NULL && source_ != NULL;
+ }
+
+ // True if this move a move into the given destination operand.
+ bool Blocks(LOperand* operand) const {
+ return !IsEliminated() && source()->Equals(operand);
+ }
+
+ // A move is redundant if it's been eliminated, if its source and
+ // destination are the same, or if its destination is unneeded.
+ bool IsRedundant() const {
+ return IsEliminated() || source_->Equals(destination_) || IsIgnored();
+ }
+
+ bool IsIgnored() const {
+ return destination_ != NULL &&
+ destination_->IsUnallocated() &&
+ LUnallocated::cast(destination_)->HasIgnorePolicy();
+ }
+
+ // We clear both operands to indicate move that's been eliminated.
+ void Eliminate() { source_ = destination_ = NULL; }
+ bool IsEliminated() const {
+ ASSERT(source_ != NULL || destination_ == NULL);
+ return source_ == NULL;
+ }
+
+ private:
+ LOperand* source_;
+ LOperand* destination_;
+};
+
+
+class LConstantOperand: public LOperand {
+ public:
+ static LConstantOperand* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LConstantOperand(index);
+ }
+
+ static LConstantOperand* cast(LOperand* op) {
+ ASSERT(op->IsConstantOperand());
+ return reinterpret_cast<LConstantOperand*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 128;
+ static LConstantOperand cache[];
+
+ LConstantOperand() : LOperand() { }
+ explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+};
+
+
+class LArgument: public LOperand {
+ public:
+ explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
+
+ static LArgument* cast(LOperand* op) {
+ ASSERT(op->IsArgument());
+ return reinterpret_cast<LArgument*>(op);
+ }
+};
+
+
+class LStackSlot: public LOperand {
+ public:
+ static LStackSlot* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LStackSlot(index);
+ }
+
+ static LStackSlot* cast(LOperand* op) {
+ ASSERT(op->IsStackSlot());
+ return reinterpret_cast<LStackSlot*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 128;
+ static LStackSlot cache[];
+
+ LStackSlot() : LOperand() { }
+ explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
+};
+
+
+class LDoubleStackSlot: public LOperand {
+ public:
+ static LDoubleStackSlot* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LDoubleStackSlot(index);
+ }
+
+ static LDoubleStackSlot* cast(LOperand* op) {
+ ASSERT(op->IsStackSlot());
+ return reinterpret_cast<LDoubleStackSlot*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 128;
+ static LDoubleStackSlot cache[];
+
+ LDoubleStackSlot() : LOperand() { }
+ explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
+};
+
+
+class LRegister: public LOperand {
+ public:
+ static LRegister* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LRegister(index);
+ }
+
+ static LRegister* cast(LOperand* op) {
+ ASSERT(op->IsRegister());
+ return reinterpret_cast<LRegister*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 16;
+ static LRegister cache[];
+
+ LRegister() : LOperand() { }
+ explicit LRegister(int index) : LOperand(REGISTER, index) { }
+};
+
+
+class LDoubleRegister: public LOperand {
+ public:
+ static LDoubleRegister* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LDoubleRegister(index);
+ }
+
+ static LDoubleRegister* cast(LOperand* op) {
+ ASSERT(op->IsDoubleRegister());
+ return reinterpret_cast<LDoubleRegister*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 16;
+ static LDoubleRegister cache[];
+
+ LDoubleRegister() : LOperand() { }
+ explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
+};
+
+
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
@@ -161,6 +509,82 @@
friend class LCodegen;
};
+
+// Iterates over the non-null, non-constant operands in an environment.
+class ShallowIterator BASE_EMBEDDED {
+ public:
+ explicit ShallowIterator(LEnvironment* env)
+ : env_(env),
+ limit_(env != NULL ? env->values()->length() : 0),
+ current_(0) {
+ current_ = AdvanceToNext(0);
+ }
+
+ inline bool HasNext() {
+ return env_ != NULL && current_ < limit_;
+ }
+
+ inline LOperand* Next() {
+ ASSERT(HasNext());
+ return env_->values()->at(current_);
+ }
+
+ inline void Advance() {
+ current_ = AdvanceToNext(current_ + 1);
+ }
+
+ inline LEnvironment* env() { return env_; }
+
+ private:
+ inline int AdvanceToNext(int start) {
+ while (start < limit_ &&
+ (env_->values()->at(start) == NULL ||
+ env_->values()->at(start)->IsConstantOperand())) {
+ start++;
+ }
+ return start;
+ }
+
+ LEnvironment* env_;
+ int limit_;
+ int current_;
+};
+
+
+// Iterator for non-null, non-constant operands incl. outer environments.
+class DeepIterator BASE_EMBEDDED {
+ public:
+ explicit DeepIterator(LEnvironment* env)
+ : current_iterator_(env) { }
+
+ inline bool HasNext() {
+ if (current_iterator_.HasNext()) return true;
+ if (current_iterator_.env() == NULL) return false;
+ AdvanceToOuter();
+ return current_iterator_.HasNext();
+ }
+
+ inline LOperand* Next() {
+ ASSERT(current_iterator_.HasNext());
+ return current_iterator_.Next();
+ }
+
+ inline void Advance() {
+ if (current_iterator_.HasNext()) {
+ current_iterator_.Advance();
+ } else {
+ AdvanceToOuter();
+ }
+ }
+
+ private:
+ inline void AdvanceToOuter() {
+ current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+ }
+
+ ShallowIterator current_iterator_;
+};
+
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
diff --git a/src/liveedit.cc b/src/liveedit.cc
index b6ad4cf..a395c51 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -36,7 +36,6 @@
#include "deoptimizer.h"
#include "global-handles.h"
#include "memory.h"
-#include "oprofile-agent.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
diff --git a/src/liveobjectlist-inl.h b/src/liveobjectlist-inl.h
new file mode 100644
index 0000000..997da4e
--- /dev/null
+++ b/src/liveobjectlist-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIVEOBJECTLIST_INL_H_
+#define V8_LIVEOBJECTLIST_INL_H_
+
+#include "v8.h"
+
+#include "liveobjectlist.h"
+
+#endif // V8_LIVEOBJECTLIST_INL_H_
+
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
new file mode 100644
index 0000000..28a3d6d
--- /dev/null
+++ b/src/liveobjectlist.cc
@@ -0,0 +1,53 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef LIVE_OBJECT_LIST
+
+#include <ctype.h>
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "checks.h"
+#include "global-handles.h"
+#include "heap.h"
+#include "inspector.h"
+#include "list-inl.h"
+#include "liveobjectlist.h"
+#include "string-stream.h"
+#include "top.h"
+#include "v8utils.h"
+
+namespace v8 {
+namespace internal {
+
+
+
+} } // namespace v8::internal
+
+#endif // LIVE_OBJECT_LIST
+
diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h
new file mode 100644
index 0000000..11f5c45
--- /dev/null
+++ b/src/liveobjectlist.h
@@ -0,0 +1,112 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIVEOBJECTLIST_H_
+#define V8_LIVEOBJECTLIST_H_
+
+#include "v8.h"
+
+#include "checks.h"
+#include "heap.h"
+#include "objects.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef LIVE_OBJECT_LIST
+
+
+// Temporary stubbed out LiveObjectList implementation.
+class LiveObjectList {
+ public:
+ inline static void GCEpilogue() {}
+ inline static void GCPrologue() {}
+ inline static void IterateElements(ObjectVisitor* v) {}
+ inline static void ProcessNonLive(HeapObject *obj) {}
+ inline static void UpdateReferencesForScavengeGC() {}
+
+ static MaybeObject* Capture() { return Heap::undefined_value(); }
+ static bool Delete(int id) { return false; }
+ static MaybeObject* Dump(int id1,
+ int id2,
+ int start_idx,
+ int dump_limit,
+ Handle<JSObject> filter_obj) {
+ return Heap::undefined_value();
+ }
+ static MaybeObject* Info(int start_idx, int dump_limit) {
+ return Heap::undefined_value();
+ }
+ static MaybeObject* Summarize(int id1,
+ int id2,
+ Handle<JSObject> filter_obj) {
+ return Heap::undefined_value();
+ }
+
+ static void Reset() {}
+ static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
+ static Object* GetObjId(Handle<String> address) {
+ return Heap::undefined_value();
+ }
+ static MaybeObject* GetObjRetainers(int obj_id,
+ Handle<JSObject> instance_filter,
+ bool verbose,
+ int start,
+ int count,
+ Handle<JSObject> filter_obj) {
+ return Heap::undefined_value();
+ }
+
+ static Object* GetPath(int obj_id1,
+ int obj_id2,
+ Handle<JSObject> instance_filter) {
+ return Heap::undefined_value();
+ }
+ static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
+};
+
+
+#else // !LIVE_OBJECT_LIST
+
+
+class LiveObjectList {
+ public:
+ static void GCEpilogue() {}
+ static void GCPrologue() {}
+ static void IterateElements(ObjectVisitor* v) {}
+ static void ProcessNonLive(HeapObject *obj) {}
+ static void UpdateReferencesForScavengeGC() {}
+};
+
+
+#endif // LIVE_OBJECT_LIST
+
+} } // namespace v8::internal
+
+#endif // V8_LIVEOBJECTLIST_H_
+
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a946ffa..5c649d1 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -33,6 +33,7 @@
#include "gdb-jit.h"
#include "global-handles.h"
#include "ic-inl.h"
+#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "objects-visiting.h"
#include "stub-cache.h"
@@ -1660,6 +1661,7 @@
free_start = current;
is_prev_alive = false;
}
+ LiveObjectList::ProcessNonLive(object);
}
}
@@ -1880,6 +1882,9 @@
size,
false);
} else {
+ // Process the dead object before we write a NULL into its header.
+ LiveObjectList::ProcessNonLive(object);
+
size = object->Size();
Memory::Address_at(current) = NULL;
}
@@ -1899,6 +1904,7 @@
// Update roots.
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+ LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces.
Heap::IterateDirtyRegions(Heap::old_pointer_space(),
@@ -1986,6 +1992,7 @@
free_start = current;
is_previous_alive = false;
}
+ LiveObjectList::ProcessNonLive(object);
}
// The object is now unmarked for the call to Size() at the top of the
// loop.
@@ -2164,6 +2171,7 @@
void UpdateMapPointersInRoots() {
Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
+ LiveObjectList::IterateElements(&map_updating_visitor_);
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
@@ -2533,6 +2541,8 @@
// Update the pointer to the head of the weak list of global contexts.
updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+ LiveObjectList::IterateElements(&updating_visitor);
+
int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
diff --git a/src/math.js b/src/math.js
index 02b19ab..70b8c57 100644
--- a/src/math.js
+++ b/src/math.js
@@ -220,7 +220,7 @@
DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetProperty($Math,
"LOG10E",
- 0.43429448190325176,
+ 0.4342944819032518,
DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetProperty($Math,
"PI",
diff --git a/src/messages.cc b/src/messages.cc
index 42fc3c9..990000a 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -62,67 +62,48 @@
}
-Handle<Object> MessageHandler::MakeMessageObject(
+Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
- // Build error message object
- v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom.
- Handle<Object> type_str = Factory::LookupAsciiSymbol(type);
- Handle<Object> array = Factory::NewJSArray(args.length());
- for (int i = 0; i < args.length(); i++)
- SetElement(Handle<JSArray>::cast(array), i, args[i]);
+ Handle<String> type_handle = Factory::LookupAsciiSymbol(type);
+ Handle<FixedArray> arguments_elements =
+ Factory::NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ arguments_elements->set(i, *args[i]);
+ }
+ Handle<JSArray> arguments_handle =
+ Factory::NewJSArrayWithElements(arguments_elements);
- Handle<JSFunction> fun(Top::global_context()->make_message_fun());
- int start, end;
- Handle<Object> script;
+ int start = 0;
+ int end = 0;
+ Handle<Object> script_handle = Factory::undefined_value();
if (loc) {
start = loc->start_pos();
end = loc->end_pos();
- script = GetScriptWrapper(loc->script());
- } else {
- start = end = 0;
- script = Factory::undefined_value();
+ script_handle = GetScriptWrapper(loc->script());
}
- Handle<Object> start_handle(Smi::FromInt(start));
- Handle<Object> end_handle(Smi::FromInt(end));
- Handle<Object> stack_trace_val = stack_trace.is_null()
- ? Factory::undefined_value()
- : Handle<Object>::cast(stack_trace);
- Handle<Object> stack_frames_val = stack_frames.is_null()
- ? Factory::undefined_value()
- : Handle<Object>::cast(stack_frames);
- const int argc = 7;
- Object** argv[argc] = { type_str.location(),
- array.location(),
- start_handle.location(),
- end_handle.location(),
- script.location(),
- stack_trace_val.location(),
- stack_frames_val.location() };
- // Setup a catch handler to catch exceptions in creating the message. This
- // handler is non-verbose to avoid calling MakeMessage recursively in case of
- // an exception.
- v8::TryCatch catcher;
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
+ Handle<Object> stack_trace_handle = stack_trace.is_null()
+ ? Factory::undefined_value()
+ : Handle<Object>::cast(stack_trace);
- // Format the message.
- bool caught_exception = false;
- Handle<Object> message =
- Execution::Call(fun, Factory::undefined_value(), argc, argv,
- &caught_exception);
+ Handle<Object> stack_frames_handle = stack_frames.is_null()
+ ? Factory::undefined_value()
+ : Handle<Object>::cast(stack_frames);
- // If creating the message (in JS code) resulted in an exception, we
- // skip doing the callback. This usually only happens in case of
- // stack overflow exceptions being thrown by the parser when the
- // stack is almost full.
- if (caught_exception) return Handle<Object>();
+ Handle<JSMessageObject> message =
+ Factory::NewJSMessageObject(type_handle,
+ arguments_handle,
+ start,
+ end,
+ script_handle,
+ stack_trace_handle,
+ stack_frames_handle);
- return message.EscapeFrom(&scope);
+ return message;
}
diff --git a/src/messages.h b/src/messages.h
index 440bde8..48f3244 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -93,11 +93,12 @@
static void ReportMessage(const char* msg);
// Returns a message object for the API to use.
- static Handle<Object> MakeMessageObject(const char* type,
- MessageLocation* loc,
- Vector< Handle<Object> > args,
- Handle<String> stack_trace,
- Handle<JSArray> stack_frames);
+ static Handle<JSMessageObject> MakeMessageObject(
+ const char* type,
+ MessageLocation* loc,
+ Vector< Handle<Object> > args,
+ Handle<String> stack_trace,
+ Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
static void ReportMessage(MessageLocation* loc, Handle<Object> message);
diff --git a/src/messages.js b/src/messages.js
index a30ef8a..1e41b17 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -38,10 +38,6 @@
var COMPILATION_TYPE_EVAL = 1;
var COMPILATION_TYPE_JSON = 2;
-// Lazily initialized.
-var kVowelSounds = 0;
-var kCapitalVowelSounds = 0;
-
// Matches Messages::kNoLineNumberInfo from v8.h
var kNoLineNumberInfo = 0;
@@ -50,61 +46,70 @@
// message on access.
var kAddMessageAccessorsMarker = { };
+var kMessages = 0;
-function GetInstanceName(cons) {
- if (cons.length == 0) {
- return "";
- }
- var first = %StringToLowerCase(StringCharAt.call(cons, 0));
- if (kVowelSounds === 0) {
- kVowelSounds = {a: true, e: true, i: true, o: true, u: true, y: true};
- kCapitalVowelSounds = {a: true, e: true, i: true, o: true, u: true, h: true,
- f: true, l: true, m: true, n: true, r: true, s: true, x: true, y: true};
- }
- var vowel_mapping = kVowelSounds;
- if (cons.length > 1 && (StringCharAt.call(cons, 0) != first)) {
- // First char is upper case
- var second = %StringToLowerCase(StringCharAt.call(cons, 1));
- // Second char is upper case
- if (StringCharAt.call(cons, 1) != second) {
- vowel_mapping = kCapitalVowelSounds;
+var kReplacementMarkers = [ "%0", "%1", "%2", "%3" ];
+
+function FormatString(format, message) {
+ var args = %MessageGetArguments(message);
+ var result = "";
+ var arg_num = 0;
+ for (var i = 0; i < format.length; i++) {
+ var str = format[i];
+ for (arg_num = 0; arg_num < kReplacementMarkers.length; arg_num++) {
+ if (format[i] !== kReplacementMarkers[arg_num]) continue;
+ try {
+ str = ToDetailString(args[arg_num]);
+ } catch (e) {
+ str = "#<error>";
+ }
}
+ result += str;
}
- var s = vowel_mapping[first] ? "an " : "a ";
- return s + cons;
+ return result;
}
-var kMessages = 0;
+// To check if something is a native error we need to check the
+// concrete native error types. It is not enough to check "obj
+// instanceof $Error" because user code can replace
+// NativeError.prototype.__proto__. User code cannot replace
+// NativeError.prototype though and therefore this is a safe test.
+function IsNativeErrorObject(obj) {
+ return (obj instanceof $Error) ||
+ (obj instanceof $EvalError) ||
+ (obj instanceof $RangeError) ||
+ (obj instanceof $ReferenceError) ||
+ (obj instanceof $SyntaxError) ||
+ (obj instanceof $TypeError) ||
+ (obj instanceof $URIError);
+}
-function FormatString(format, args) {
- var result = format;
- for (var i = 0; i < args.length; i++) {
- var str;
- try { str = ToDetailString(args[i]); }
- catch (e) { str = "#<error>"; }
- result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
+// When formatting internally created error messages, do not
+// invoke overwritten error toString methods but explicitly use
+// the error to string method. This is to avoid leaking error
+// objects between script tags in a browser setting.
+function ToStringCheckErrorObject(obj) {
+ if (IsNativeErrorObject(obj)) {
+ return %_CallFunction(obj, errorToString);
+ } else {
+ return ToString(obj);
}
- return result;
}
function ToDetailString(obj) {
if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
var constructor = obj.constructor;
- if (!constructor) return ToString(obj);
+ if (!constructor) return ToStringCheckErrorObject(obj);
var constructorName = constructor.name;
- if (!constructorName) return ToString(obj);
- return "#<" + GetInstanceName(constructorName) + ">";
- } else if (obj instanceof $Error) {
- // When formatting internally created error messages, do not
- // invoke overwritten error toString methods but explicitly use
- // the error to string method. This is to avoid leaking error
- // objects between script tags in a browser setting.
- return %_CallFunction(obj, errorToString);
+ if (!constructorName || !IS_STRING(constructorName)) {
+ return ToStringCheckErrorObject(obj);
+ }
+ return "#<" + constructorName + ">";
} else {
- return ToString(obj);
+ return ToStringCheckErrorObject(obj);
}
}
@@ -136,84 +141,103 @@
if (kMessages === 0) {
kMessages = {
// Error
- cyclic_proto: "Cyclic __proto__ value",
+ cyclic_proto: ["Cyclic __proto__ value"],
// TypeError
- unexpected_token: "Unexpected token %0",
- unexpected_token_number: "Unexpected number",
- unexpected_token_string: "Unexpected string",
- unexpected_token_identifier: "Unexpected identifier",
- unexpected_eos: "Unexpected end of input",
- malformed_regexp: "Invalid regular expression: /%0/: %1",
- unterminated_regexp: "Invalid regular expression: missing /",
- regexp_flags: "Cannot supply flags when constructing one RegExp from another",
- incompatible_method_receiver: "Method %0 called on incompatible receiver %1",
- invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
- invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
- invalid_lhs_in_postfix_op: "Invalid left-hand side expression in postfix operation",
- invalid_lhs_in_prefix_op: "Invalid left-hand side expression in prefix operation",
- multiple_defaults_in_switch: "More than one default clause in switch statement",
- newline_after_throw: "Illegal newline after throw",
- redeclaration: "%0 '%1' has already been declared",
- no_catch_or_finally: "Missing catch or finally after try",
- unknown_label: "Undefined label '%0'",
- uncaught_exception: "Uncaught %0",
- stack_trace: "Stack Trace:\n%0",
- called_non_callable: "%0 is not a function",
- undefined_method: "Object %1 has no method '%0'",
- property_not_function: "Property '%0' of object %1 is not a function",
- cannot_convert_to_primitive: "Cannot convert object to primitive value",
- not_constructor: "%0 is not a constructor",
- not_defined: "%0 is not defined",
- non_object_property_load: "Cannot read property '%0' of %1",
- non_object_property_store: "Cannot set property '%0' of %1",
- non_object_property_call: "Cannot call method '%0' of %1",
- with_expression: "%0 has no properties",
- illegal_invocation: "Illegal invocation",
- no_setter_in_callback: "Cannot set property %0 of %1 which has only a getter",
- apply_non_function: "Function.prototype.apply was called on %0, which is a %1 and not a function",
- apply_wrong_args: "Function.prototype.apply: Arguments list has wrong type",
- invalid_in_operator_use: "Cannot use 'in' operator to search for '%0' in %1",
- instanceof_function_expected: "Expecting a function in instanceof check, but got %0",
- instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
- null_to_object: "Cannot convert null to object",
- reduce_no_initial: "Reduce of empty array with no initial value",
- getter_must_be_callable: "Getter must be a function: %0",
- setter_must_be_callable: "Setter must be a function: %0",
- value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
- proto_object_or_null: "Object prototype may only be an Object or null",
- property_desc_object: "Property description must be an object: %0",
- redefine_disallowed: "Cannot redefine property: %0",
- define_disallowed: "Cannot define property, object is not extensible: %0",
+ unexpected_token: ["Unexpected token ", "%0"],
+ unexpected_token_number: ["Unexpected number"],
+ unexpected_token_string: ["Unexpected string"],
+ unexpected_token_identifier: ["Unexpected identifier"],
+ unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
+ unexpected_eos: ["Unexpected end of input"],
+ malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
+ unterminated_regexp: ["Invalid regular expression: missing /"],
+ regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
+ incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
+ invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
+ invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
+ invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
+ invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
+ multiple_defaults_in_switch: ["More than one default clause in switch statement"],
+ newline_after_throw: ["Illegal newline after throw"],
+ redeclaration: ["%0", " '", "%1", "' has already been declared"],
+ no_catch_or_finally: ["Missing catch or finally after try"],
+ unknown_label: ["Undefined label '", "%0", "'"],
+ uncaught_exception: ["Uncaught ", "%0"],
+ stack_trace: ["Stack Trace:\n", "%0"],
+ called_non_callable: ["%0", " is not a function"],
+ undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
+ property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"],
+ cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
+ not_constructor: ["%0", " is not a constructor"],
+ not_defined: ["%0", " is not defined"],
+ non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
+ non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
+ non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
+ with_expression: ["%0", " has no properties"],
+ illegal_invocation: ["Illegal invocation"],
+ no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
+ apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
+ apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
+ invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
+ instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
+ instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
+ null_to_object: ["Cannot convert null to object"],
+ reduce_no_initial: ["Reduce of empty array with no initial value"],
+ getter_must_be_callable: ["Getter must be a function: ", "%0"],
+ setter_must_be_callable: ["Setter must be a function: ", "%0"],
+ value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value: ", "%0"],
+ proto_object_or_null: ["Object prototype may only be an Object or null"],
+ property_desc_object: ["Property description must be an object: ", "%0"],
+ redefine_disallowed: ["Cannot redefine property: ", "%0"],
+ define_disallowed: ["Cannot define property, object is not extensible: ", "%0"],
// RangeError
- invalid_array_length: "Invalid array length",
- stack_overflow: "Maximum call stack size exceeded",
+ invalid_array_length: ["Invalid array length"],
+ stack_overflow: ["Maximum call stack size exceeded"],
// SyntaxError
- unable_to_parse: "Parse error",
- duplicate_regexp_flag: "Duplicate RegExp flag %0",
- invalid_regexp: "Invalid RegExp pattern /%0/",
- illegal_break: "Illegal break statement",
- illegal_continue: "Illegal continue statement",
- illegal_return: "Illegal return statement",
- error_loading_debugger: "Error loading debugger",
- no_input_to_regexp: "No input to %0",
- invalid_json: "String '%0' is not valid JSON",
- circular_structure: "Converting circular structure to JSON",
- obj_ctor_property_non_object: "Object.%0 called on non-object",
- array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
- object_not_extensible: "Can't add property %0, object is not extensible",
- illegal_access: "Illegal access",
- invalid_preparser_data: "Invalid preparser data for function %0"
+ unable_to_parse: ["Parse error"],
+ duplicate_regexp_flag: ["Duplicate RegExp flag ", "%0"],
+ invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
+ illegal_break: ["Illegal break statement"],
+ illegal_continue: ["Illegal continue statement"],
+ illegal_return: ["Illegal return statement"],
+ error_loading_debugger: ["Error loading debugger"],
+ no_input_to_regexp: ["No input to ", "%0"],
+ invalid_json: ["String '", "%0", "' is not valid JSON"],
+ circular_structure: ["Converting circular structure to JSON"],
+ obj_ctor_property_non_object: ["Object.", "%0", " called on non-object"],
+ array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
+ object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
+ illegal_access: ["Illegal access"],
+ invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
+ strict_mode_with: ["Strict mode code may not include a with statement"],
+ strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
+ too_many_parameters: ["Too many parameters in function definition"],
+ strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
+ strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
+ strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
+ strict_function_name: ["Function name may not be eval or arguments in strict mode"],
+ strict_octal_literal: ["Octal literals are not allowed in strict mode."],
+ strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
+ accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
+ accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
+ strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
+ strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_reserved_word: ["Use of future reserved word in strict mode"],
};
}
- var format = kMessages[message.type];
- if (!format) return "<unknown message " + message.type + ">";
- return FormatString(format, message.args);
+ var message_type = %MessageGetType(message);
+ var format = kMessages[message_type];
+ if (!format) return "<unknown message " + message_type + ">";
+ return FormatString(format, message);
}
function GetLineNumber(message) {
- if (message.startPos == -1) return kNoLineNumberInfo;
- var location = message.script.locationFromPosition(message.startPos, true);
+ var start_position = %MessageGetStartPosition(message);
+ if (start_position == -1) return kNoLineNumberInfo;
+ var script = %MessageGetScript(message);
+ var location = script.locationFromPosition(start_position, true);
if (location == null) return kNoLineNumberInfo;
return location.line + 1;
}
@@ -222,7 +246,9 @@
// Returns the source code line containing the given source
// position, or the empty string if the position is invalid.
function GetSourceLine(message) {
- var location = message.script.locationFromPosition(message.startPos, true);
+ var script = %MessageGetScript(message);
+ var start_position = %MessageGetStartPosition(message);
+ var location = script.locationFromPosition(start_position, true);
if (location == null) return "";
location.restrict();
return location.sourceText();
@@ -291,6 +317,7 @@
return i;
}
}
+
return -1;
}
@@ -311,7 +338,7 @@
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
+ if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') end--;
var column = position - start;
// Adjust according to the offset within the resource.
@@ -426,7 +453,7 @@
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- return StringSubstring.call(this.source, start, end);
+ return %_CallFunction(this.source, start, end, StringSubstring);
}
@@ -554,7 +581,7 @@
* Source text for this location.
*/
SourceLocation.prototype.sourceText = function () {
- return StringSubstring.call(this.script.source, this.start, this.end);
+ return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
};
@@ -591,36 +618,22 @@
* the line terminating characters (if any)
*/
SourceSlice.prototype.sourceText = function () {
- return StringSubstring.call(this.script.source, this.from_position, this.to_position);
+ return %_CallFunction(this.script.source,
+ this.from_position,
+ this.to_position,
+ StringSubstring);
};
// Returns the offset of the given position within the containing
// line.
function GetPositionInLine(message) {
- var location = message.script.locationFromPosition(message.startPos, false);
+ var script = %MessageGetScript(message);
+ var start_position = %MessageGetStartPosition(message);
+ var location = script.locationFromPosition(start_position, false);
if (location == null) return -1;
location.restrict();
- return message.startPos - location.start;
-}
-
-
-function ErrorMessage(type, args, startPos, endPos, script, stackTrace,
- stackFrames) {
- this.startPos = startPos;
- this.endPos = endPos;
- this.type = type;
- this.args = args;
- this.script = script;
- this.stackTrace = stackTrace;
- this.stackFrames = stackFrames;
-}
-
-
-function MakeMessage(type, args, startPos, endPos, script, stackTrace,
- stackFrames) {
- return new ErrorMessage(type, args, startPos, endPos, script, stackTrace,
- stackFrames);
+ return start_position - location.start;
}
@@ -666,10 +679,10 @@
CallSite.prototype.getTypeName = function () {
var constructor = this.receiver.constructor;
if (!constructor)
- return $Object.prototype.toString.call(this.receiver);
+ return %_CallFunction(this.receiver, ObjectToString);
var constructorName = constructor.name;
if (!constructorName)
- return $Object.prototype.toString.call(this.receiver);
+ return %_CallFunction(this.receiver, ObjectToString);
return constructorName;
};
@@ -718,8 +731,8 @@
// this function.
var ownName = this.fun.name;
if (ownName && this.receiver &&
- (ObjectLookupGetter.call(this.receiver, ownName) === this.fun ||
- ObjectLookupSetter.call(this.receiver, ownName) === this.fun ||
+ (%_CallFunction(this.receiver, ownName, ObjectLookupGetter) === this.fun ||
+ %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun ||
this.receiver[ownName] === this.fun)) {
// To handle DontEnum properties we guess that the method has
// the same name as the function.
@@ -967,7 +980,7 @@
// DefineOneShotAccessor always inserts a message property and
// ignores setters.
DefineOneShotAccessor(this, 'message', function (obj) {
- return FormatMessage({type: obj.type, args: obj.arguments});
+ return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
});
} else if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
@@ -981,11 +994,12 @@
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;
- if (!stackTraceLimit) return;
+ if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
if (stackTraceLimit < 0 || stackTraceLimit > 10000)
stackTraceLimit = 10000;
- var raw_stack = %CollectStackTrace(cons_opt ? cons_opt : captureStackTrace,
- stackTraceLimit);
+ var raw_stack = %CollectStackTrace(cons_opt
+ ? cons_opt
+ : captureStackTrace, stackTraceLimit);
DefineOneShotAccessor(obj, 'stack', function (obj) {
return FormatRawStackTrace(obj, raw_stack);
});
@@ -1006,19 +1020,46 @@
// Setup extra properties of the Error.prototype object.
$Error.prototype.message = '';
-function errorToString() {
- var type = this.type;
- if (type && !this.hasOwnProperty("message")) {
- return this.name + ": " + FormatMessage({ type: type, args: this.arguments });
+// Global list of error objects visited during errorToString. This is
+// used to detect cycles in error toString formatting.
+var visited_errors = new $Array();
+var cyclic_error_marker = new $Object();
+
+function errorToStringDetectCycle() {
+ if (!%PushIfAbsent(visited_errors, this)) throw cyclic_error_marker;
+ try {
+ var type = this.type;
+ if (type && !%_CallFunction(this, "message", ObjectHasOwnProperty)) {
+ var formatted = FormatMessage(%NewMessageObject(type, this.arguments));
+ return this.name + ": " + formatted;
+ }
+ var message = %_CallFunction(this, "message", ObjectHasOwnProperty)
+ ? (": " + this.message)
+ : "";
+ return this.name + message;
+ } finally {
+ visited_errors.length = visited_errors.length - 1;
}
- var message = this.hasOwnProperty("message") ? (": " + this.message) : "";
- return this.name + message;
+}
+
+function errorToString() {
+ // This helper function is needed because access to properties on
+ // the builtins object do not work inside of a catch clause.
+ function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
+
+ try {
+ return %_CallFunction(this, errorToStringDetectCycle);
+ } catch(e) {
+ // If this error message was encountered already return the empty
+ // string for it instead of recursively formatting it.
+ if (isCyclicErrorMarker(e)) return '';
+ else throw e;
+ }
}
%FunctionSetName(errorToString, 'toString');
%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
-
// Boilerplate for exceptions for stack overflows. Used from
// Top::StackOverflow().
const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 519fe62..e5c2ad8 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -172,23 +172,11 @@
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 91dec17..683b862 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -397,6 +397,20 @@
}
+Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 9177a6b..416f887 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -411,7 +411,7 @@
Mirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass.
- return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
+ return "#<" + this.constructor.name + ">";
}
@@ -425,7 +425,7 @@
* @extends Mirror
*/
function ValueMirror(type, value, transient) {
- Mirror.call(this, type);
+ %_CallFunction(this, type, Mirror);
this.value_ = value;
if (!transient) {
this.allocateHandle_();
@@ -470,7 +470,7 @@
* @extends ValueMirror
*/
function UndefinedMirror() {
- ValueMirror.call(this, UNDEFINED_TYPE, void 0);
+ %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror);
}
inherits(UndefinedMirror, ValueMirror);
@@ -486,7 +486,7 @@
* @extends ValueMirror
*/
function NullMirror() {
- ValueMirror.call(this, NULL_TYPE, null);
+ %_CallFunction(this, NULL_TYPE, null, ValueMirror);
}
inherits(NullMirror, ValueMirror);
@@ -503,7 +503,7 @@
* @extends ValueMirror
*/
function BooleanMirror(value) {
- ValueMirror.call(this, BOOLEAN_TYPE, value);
+ %_CallFunction(this, BOOLEAN_TYPE, value, ValueMirror);
}
inherits(BooleanMirror, ValueMirror);
@@ -520,7 +520,7 @@
* @extends ValueMirror
*/
function NumberMirror(value) {
- ValueMirror.call(this, NUMBER_TYPE, value);
+ %_CallFunction(this, NUMBER_TYPE, value, ValueMirror);
}
inherits(NumberMirror, ValueMirror);
@@ -537,7 +537,7 @@
* @extends ValueMirror
*/
function StringMirror(value) {
- ValueMirror.call(this, STRING_TYPE, value);
+ %_CallFunction(this, STRING_TYPE, value, ValueMirror);
}
inherits(StringMirror, ValueMirror);
@@ -568,7 +568,7 @@
* @extends ValueMirror
*/
function ObjectMirror(value, type, transient) {
- ValueMirror.call(this, type || OBJECT_TYPE, value, transient);
+ %_CallFunction(this, type || OBJECT_TYPE, value, transient, ValueMirror);
}
inherits(ObjectMirror, ValueMirror);
@@ -767,7 +767,7 @@
name = this.className();
}
}
- return '#<' + builtins.GetInstanceName(name) + '>';
+ return '#<' + name + '>';
};
@@ -778,7 +778,7 @@
* @extends ObjectMirror
*/
function FunctionMirror(value) {
- ObjectMirror.call(this, value, FUNCTION_TYPE);
+ %_CallFunction(this, value, FUNCTION_TYPE, ObjectMirror);
this.resolved_ = true;
}
inherits(FunctionMirror, ObjectMirror);
@@ -908,7 +908,7 @@
function UnresolvedFunctionMirror(value) {
// Construct this using the ValueMirror as an unresolved function is not a
// real object but just a string.
- ValueMirror.call(this, FUNCTION_TYPE, value);
+ %_CallFunction(this, FUNCTION_TYPE, value, ValueMirror);
this.propertyCount_ = 0;
this.elementCount_ = 0;
this.resolved_ = false;
@@ -958,7 +958,7 @@
* @extends ObjectMirror
*/
function ArrayMirror(value) {
- ObjectMirror.call(this, value);
+ %_CallFunction(this, value, ObjectMirror);
}
inherits(ArrayMirror, ObjectMirror);
@@ -994,7 +994,7 @@
* @extends ObjectMirror
*/
function DateMirror(value) {
- ObjectMirror.call(this, value);
+ %_CallFunction(this, value, ObjectMirror);
}
inherits(DateMirror, ObjectMirror);
@@ -1012,7 +1012,7 @@
* @extends ObjectMirror
*/
function RegExpMirror(value) {
- ObjectMirror.call(this, value, REGEXP_TYPE);
+ %_CallFunction(this, value, REGEXP_TYPE, ObjectMirror);
}
inherits(RegExpMirror, ObjectMirror);
@@ -1066,7 +1066,7 @@
* @extends ObjectMirror
*/
function ErrorMirror(value) {
- ObjectMirror.call(this, value, ERROR_TYPE);
+ %_CallFunction(this, value, ERROR_TYPE, ObjectMirror);
}
inherits(ErrorMirror, ObjectMirror);
@@ -1084,9 +1084,9 @@
// Use the same text representation as in messages.js.
var text;
try {
- str = builtins.ToDetailString(this.value_);
+ str = %_CallFunction(this.value_, builtins.errorToString);
} catch (e) {
- str = '#<an Error>';
+ str = '#<Error>';
}
return str;
}
@@ -1101,7 +1101,7 @@
* @extends Mirror
*/
function PropertyMirror(mirror, name, details) {
- Mirror.call(this, PROPERTY_TYPE);
+ %_CallFunction(this, PROPERTY_TYPE, Mirror);
this.mirror_ = mirror;
this.name_ = name;
this.value_ = details[0];
@@ -1397,7 +1397,7 @@
* @extends Mirror
*/
function FrameMirror(break_id, index) {
- Mirror.call(this, FRAME_TYPE);
+ %_CallFunction(this, FRAME_TYPE, Mirror);
this.break_id_ = break_id;
this.index_ = index;
this.details_ = new FrameDetails(break_id, index);
@@ -1712,7 +1712,7 @@
* @extends Mirror
*/
function ScopeMirror(frame, index) {
- Mirror.call(this, SCOPE_TYPE);
+ %_CallFunction(this, SCOPE_TYPE, Mirror);
this.frame_index_ = frame.index_;
this.scope_index_ = index;
this.details_ = new ScopeDetails(frame, index);
@@ -1752,7 +1752,7 @@
* @extends Mirror
*/
function ScriptMirror(script) {
- Mirror.call(this, SCRIPT_TYPE);
+ %_CallFunction(this, SCRIPT_TYPE, Mirror);
this.script_ = script;
this.context_ = new ContextMirror(script.context_data);
this.allocateHandle_();
@@ -1868,7 +1868,7 @@
* @extends Mirror
*/
function ContextMirror(data) {
- Mirror.call(this, CONTEXT_TYPE);
+ %_CallFunction(this, CONTEXT_TYPE, Mirror);
this.data_ = data;
this.allocateHandle_();
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index f9c57e6..c1caef2 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -158,6 +158,9 @@
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
break;
+ case JS_MESSAGE_OBJECT_TYPE:
+ JSMessageObject::cast(this)->JSMessageObjectVerify();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -296,6 +299,19 @@
}
+void JSMessageObject::JSMessageObjectVerify() {
+ CHECK(IsJSMessageObject());
+ CHECK(type()->IsString());
+ CHECK(arguments()->IsJSArray());
+ VerifyObjectField(kStartPositionOffset);
+ VerifyObjectField(kEndPositionOffset);
+ VerifyObjectField(kArgumentsOffset);
+ VerifyObjectField(kScriptOffset);
+ VerifyObjectField(kStackTraceOffset);
+ VerifyObjectField(kStackFramesOffset);
+}
+
+
void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
@@ -424,7 +440,7 @@
ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() ||
(is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- ASSERT(uc16_data->IsTheHole() || ascii_data->IsJSObject() ||
+ ASSERT(uc16_data->IsTheHole() || uc16_data->IsJSObject() ||
(is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index df44674..24887a0 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -57,8 +57,7 @@
PropertyDetails PropertyDetails::AsDeleted() {
- PropertyDetails d(DONT_ENUM, NORMAL);
- Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
+ Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
@@ -409,7 +408,7 @@
bool MaybeObject::IsOutOfMemory() {
return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->IsOutOfMemoryException();
+ && Failure::cast(this)->IsOutOfMemoryException();
}
@@ -431,26 +430,26 @@
bool Object::IsJSObject() {
return IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
}
bool Object::IsJSContextExtensionObject() {
return IsHeapObject()
- && (HeapObject::cast(this)->map()->instance_type() ==
- JS_CONTEXT_EXTENSION_OBJECT_TYPE);
+ && (HeapObject::cast(this)->map()->instance_type() ==
+ JS_CONTEXT_EXTENSION_OBJECT_TYPE);
}
bool Object::IsMap() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
}
bool Object::IsFixedArray() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
}
@@ -496,19 +495,19 @@
bool Object::IsCatchContext() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::catch_context_map();
+ && HeapObject::cast(this)->map() == Heap::catch_context_map();
}
bool Object::IsGlobalContext() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::global_context_map();
+ && HeapObject::cast(this)->map() == Heap::global_context_map();
}
bool Object::IsJSFunction() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
}
@@ -519,7 +518,7 @@
bool Object::IsCode() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
}
@@ -545,7 +544,14 @@
bool Object::IsJSValue() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
+}
+
+
+bool Object::IsJSMessageObject() {
+ return Object::IsHeapObject()
+ && (HeapObject::cast(this)->map()->instance_type() ==
+ JS_MESSAGE_OBJECT_TYPE);
}
@@ -556,7 +562,7 @@
bool Object::IsProxy() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
}
@@ -567,13 +573,13 @@
bool Object::IsJSArray() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
}
bool Object::IsJSRegExp() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
+ && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
}
@@ -584,7 +590,7 @@
bool Object::IsHashTable() {
return Object::IsHeapObject()
- && HeapObject::cast(this)->map() == Heap::hash_table_map();
+ && HeapObject::cast(this)->map() == Heap::hash_table_map();
}
@@ -1286,6 +1292,8 @@
return JSValue::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
return JSObject::kHeaderSize;
+ case JS_MESSAGE_OBJECT_TYPE:
+ return JSMessageObject::kSize;
default:
UNREACHABLE();
return 0;
@@ -2502,29 +2510,29 @@
}
-unsigned Code::safepoint_table_start() {
+unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kSafepointTableStartOffset);
+ return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
}
-void Code::set_safepoint_table_start(unsigned offset) {
+void Code::set_safepoint_table_offset(unsigned offset) {
ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset);
+ WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
}
-unsigned Code::stack_check_table_start() {
+unsigned Code::stack_check_table_offset() {
ASSERT(kind() == FUNCTION);
- return READ_UINT32_FIELD(this, kStackCheckTableStartOffset);
+ return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
}
-void Code::set_stack_check_table_start(unsigned offset) {
+void Code::set_stack_check_table_offset(unsigned offset) {
ASSERT(kind() == FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset);
+ WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
}
@@ -2602,10 +2610,12 @@
PropertyType type,
int argc,
InlineCacheHolderFlag holder) {
- // Extra IC state is only allowed for monomorphic call IC stubs.
+ // Extra IC state is only allowed for monomorphic call IC stubs
+ // or for store IC stubs.
ASSERT(extra_ic_state == kNoExtraICState ||
(kind == CALL_IC && (ic_state == MONOMORPHIC ||
- ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)));
+ ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
+ (kind == STORE_IC));
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
@@ -2739,6 +2749,22 @@
}
+MaybeObject* Map::GetPixelArrayElementsMap() {
+ if (has_pixel_array_elements()) return this;
+ // TODO(danno): Special case empty object map (or most common case)
+ // to return a pre-canned pixel array map.
+ Object* obj;
+ { MaybeObject* maybe_obj = CopyDropTransitions();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ Map* new_map = Map::cast(obj);
+ new_map->set_has_fast_elements(false);
+ new_map->set_has_pixel_array_elements(true);
+ Counters::map_to_pixel_array_elements.Increment();
+ return new_map;
+}
+
+
ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset)
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
@@ -2985,6 +3011,18 @@
}
+bool SharedFunctionInfo::strict_mode() {
+ return BooleanBit::get(compiler_hints(), kStrictModeFunction);
+}
+
+
+void SharedFunctionInfo::set_strict_mode(bool value) {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kStrictModeFunction,
+ value));
+}
+
+
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -3079,12 +3117,6 @@
}
-bool SharedFunctionInfo::IsBuiltinMathFunction() {
- return HasBuiltinFunctionId() &&
- builtin_function_id() >= kFirstMathFunctionId;
-}
-
-
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
ASSERT(HasBuiltinFunctionId());
return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
@@ -3296,6 +3328,22 @@
}
+ACCESSORS(JSMessageObject, type, String, kTypeOffset)
+ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
+ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
+ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
+ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
+SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
+SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
+
+
+JSMessageObject* JSMessageObject::cast(Object* obj) {
+ ASSERT(obj->IsJSMessageObject());
+ ASSERT(HeapObject::cast(obj)->Size() == JSMessageObject::kSize);
+ return reinterpret_cast<JSMessageObject*>(obj);
+}
+
+
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 9879da2..237358d 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -151,6 +151,9 @@
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
break;
+ case JS_MESSAGE_OBJECT_TYPE:
+ JSMessageObject::cast(this)->JSMessageObjectPrint(out);
+ break;
case JS_GLOBAL_PROPERTY_CELL_TYPE:
JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(out);
break;
@@ -395,6 +398,8 @@
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case PROXY_TYPE: return "PROXY";
+ case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
+ case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
@@ -465,6 +470,24 @@
}
+void JSMessageObject::JSMessageObjectPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSMessageObject");
+ PrintF(out, " - type: ");
+ type()->ShortPrint(out);
+ PrintF(out, "\n - arguments: ");
+ arguments()->ShortPrint(out);
+ PrintF(out, "\n - start_position: %d", start_position());
+ PrintF(out, "\n - end_position: %d", end_position());
+ PrintF(out, "\n - script: ");
+ script()->ShortPrint(out);
+ PrintF(out, "\n - stack_trace: ");
+ stack_trace()->ShortPrint(out);
+ PrintF(out, "\n - stack_frames: ");
+ stack_frames()->ShortPrint(out);
+ PrintF(out, "\n");
+}
+
+
void String::StringPrint(FILE* out) {
if (StringShape(this).IsSymbol()) {
PrintF(out, "#");
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index c35e02c..5f054bd 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -104,6 +104,7 @@
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
diff --git a/src/objects.cc b/src/objects.cc
index 36a8e5c..5003b4f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -979,6 +979,9 @@
case SHARED_FUNCTION_INFO_TYPE:
accumulator->Add("<SharedFunctionInfo>");
break;
+ case JS_MESSAGE_OBJECT_TYPE:
+ accumulator->Add("<JSMessageObject>");
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
accumulator->Put('<'); \
@@ -1069,6 +1072,7 @@
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break;
case JS_FUNCTION_TYPE:
@@ -1209,6 +1213,8 @@
MaybeObject* JSObject::AddFastProperty(String* name,
Object* value,
PropertyAttributes attributes) {
+ ASSERT(!IsJSGlobalProxy());
+
// Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier.
StringInputBuffer buffer(name);
@@ -1701,8 +1707,9 @@
}
-bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
- Object* value) {
+MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
+ Object* value,
+ bool* found) {
for (Object* pt = GetPrototype();
pt != Heap::null_value();
pt = pt->GetPrototype()) {
@@ -1712,15 +1719,16 @@
NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
- SetElementWithCallback(element, index, value, JSObject::cast(pt));
- return true;
+ *found = true;
+ return SetElementWithCallback(
+ dictionary->ValueAt(entry), index, value, JSObject::cast(pt));
}
}
}
- return false;
+ *found = false;
+ return Heap::the_hole_value();
}
@@ -2284,6 +2292,9 @@
// The global object is always normalized.
ASSERT(!IsGlobalObject());
+ // JSGlobalProxy must never be normalized
+ ASSERT(!IsJSGlobalProxy());
+
// Allocate new content.
int property_count = map()->NumberOfDescribedProperties();
if (expected_additional_properties > 0) {
@@ -2770,6 +2781,13 @@
MaybeObject* JSObject::PreventExtensions() {
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return this;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->PreventExtensions();
+ }
+
// If there are fast elements we normalize.
if (HasFastElements()) {
Object* ok;
@@ -5916,7 +5934,7 @@
Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+ Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
it.rinfo()->set_target_cell(*cell);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
@@ -5997,7 +6015,7 @@
void Code::SetNoStackCheckTable() {
// Indicate the absence of a stack-check table by a table start after the
// end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_start(RoundUp(instruction_size(), kIntSize));
+ set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
}
@@ -6220,10 +6238,35 @@
}
+void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
+ const char* name = NULL;
+ switch (kind) {
+ case CALL_IC:
+ if (extra == STRING_INDEX_OUT_OF_BOUNDS) {
+ name = "STRING_INDEX_OUT_OF_BOUNDS";
+ }
+ break;
+ case STORE_IC:
+ if (extra == StoreIC::kStoreICStrict) {
+ name = "STRICT";
+ }
+ break;
+ default:
+ break;
+ }
+ if (name != NULL) {
+ PrintF(out, "extra_ic_state = %s\n", name);
+ } else {
+ PrintF(out, "etra_ic_state = %d\n", extra);
+ }
+}
+
+
void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "kind = %s\n", Kind2String(kind()));
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
+ PrintExtraICState(out, kind(), extra_ic_state());
PrintF(out, "ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", PropertyType2String(type()));
@@ -6274,7 +6317,7 @@
}
PrintF(out, "\n");
} else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_start();
+ unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
@@ -6675,6 +6718,13 @@
return UNDEFINED_ELEMENT;
}
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return UNDEFINED_ELEMENT;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->HasLocalElement(index);
+ }
+
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
@@ -6946,9 +6996,11 @@
uint32_t elms_length = static_cast<uint32_t>(elms->length());
if (check_prototype &&
- (index >= elms_length || elms->get(index)->IsTheHole()) &&
- SetElementWithCallbackSetterInPrototypes(index, value)) {
- return value;
+ (index >= elms_length || elms->get(index)->IsTheHole())) {
+ bool found;
+ MaybeObject* result =
+ SetElementWithCallbackSetterInPrototypes(index, value, &found);
+ if (found) return result;
}
@@ -7080,9 +7132,11 @@
}
} else {
// Index not already used. Look for an accessor in the prototype chain.
- if (check_prototype &&
- SetElementWithCallbackSetterInPrototypes(index, value)) {
- return value;
+ if (check_prototype) {
+ bool found;
+ MaybeObject* result =
+ SetElementWithCallbackSetterInPrototypes(index, value, &found);
+ if (found) return result;
}
// When we set the is_extensible flag to false we always force
// the element into dictionary mode (and force them to stay there).
@@ -7982,20 +8036,28 @@
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
- StringSharedKey(String* source, SharedFunctionInfo* shared)
- : source_(source), shared_(shared) { }
+ StringSharedKey(String* source,
+ SharedFunctionInfo* shared,
+ StrictModeFlag strict_mode)
+ : source_(source),
+ shared_(shared),
+ strict_mode_(strict_mode) { }
bool IsMatch(Object* other) {
if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
if (shared != shared_) return false;
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+ Smi::cast(pair->get(2))->value());
+ if (strict_mode != strict_mode_) return false;
String* source = String::cast(pair->get(1));
return source->Equals(source_);
}
static uint32_t StringSharedHashHelper(String* source,
- SharedFunctionInfo* shared) {
+ SharedFunctionInfo* shared,
+ StrictModeFlag strict_mode) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash
@@ -8005,36 +8067,41 @@
// collection.
Script* script = Script::cast(shared->script());
hash ^= String::cast(script->source())->Hash();
+ if (strict_mode == kStrictMode) hash ^= 0x8000;
hash += shared->start_position();
}
return hash;
}
uint32_t Hash() {
- return StringSharedHashHelper(source_, shared_);
+ return StringSharedHashHelper(source_, shared_, strict_mode_);
}
uint32_t HashForObject(Object* obj) {
FixedArray* pair = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
String* source = String::cast(pair->get(1));
- return StringSharedHashHelper(source, shared);
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+ Smi::cast(pair->get(2))->value());
+ return StringSharedHashHelper(source, shared, strict_mode);
}
MUST_USE_RESULT MaybeObject* AsObject() {
Object* obj;
- { MaybeObject* maybe_obj = Heap::AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = Heap::AllocateFixedArray(3);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
pair->set(0, shared_);
pair->set(1, source_);
+ pair->set(2, Smi::FromInt(strict_mode_));
return pair;
}
private:
String* source_;
SharedFunctionInfo* shared_;
+ StrictModeFlag strict_mode_;
};
@@ -8523,10 +8590,20 @@
if (value->IsUndefined()) {
undefs++;
} else {
+ if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return Smi::FromInt(-1);
+ }
new_dict->AddNumberEntry(pos, value, details)->ToObjectUnchecked();
pos++;
}
} else {
+ if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return Smi::FromInt(-1);
+ }
new_dict->AddNumberEntry(key, value, details)->ToObjectUnchecked();
}
}
@@ -8535,6 +8612,11 @@
uint32_t result = pos;
PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
while (undefs > 0) {
+ if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return Smi::FromInt(-1);
+ }
new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details)->
ToObjectUnchecked();
pos++;
@@ -8993,8 +9075,10 @@
}
-Object* CompilationCacheTable::LookupEval(String* src, Context* context) {
- StringSharedKey key(src, context->closure()->shared());
+Object* CompilationCacheTable::LookupEval(String* src,
+ Context* context,
+ StrictModeFlag strict_mode) {
+ StringSharedKey key(src, context->closure()->shared(), strict_mode);
int entry = FindEntry(&key);
if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -9029,8 +9113,10 @@
MaybeObject* CompilationCacheTable::PutEval(String* src,
Context* context,
- Object* value) {
- StringSharedKey key(src, context->closure()->shared());
+ SharedFunctionInfo* value) {
+ StringSharedKey key(src,
+ context->closure()->shared(),
+ value->strict_mode() ? kStrictMode : kNonStrictMode);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
diff --git a/src/objects.h b/src/objects.h
index f9cab45..264cc0b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -54,7 +54,8 @@
// - JSGlobalObject
// - JSBuiltinsObject
// - JSGlobalProxy
-// - JSValue
+// - JSValue
+// - JSMessageObject
// - ByteArray
// - PixelArray
// - ExternalArray
@@ -288,6 +289,8 @@
V(FIXED_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
\
+ V(JS_MESSAGE_OBJECT_TYPE) \
+ \
V(JS_VALUE_TYPE) \
V(JS_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
@@ -455,6 +458,7 @@
enum InstanceType {
// String types.
+ // FIRST_STRING_TYPE
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
@@ -471,6 +475,7 @@
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
+ // LAST_STRING_TYPE
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
@@ -516,6 +521,8 @@
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ JS_MESSAGE_OBJECT_TYPE,
+
JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
@@ -523,7 +530,8 @@
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
- JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE
+
+ JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
JS_FUNCTION_TYPE,
@@ -532,6 +540,8 @@
LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
+ FIRST_STRING_TYPE = FIRST_TYPE,
+ LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
@@ -541,7 +551,10 @@
// function objects are not counted as objects, even though they are
// implemented as such; only values whose typeof is "object" are included.
FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE
+ LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
+ // RegExp objects have [[Class]] "function" because they are callable.
+ // All types from this type and above are objects with [[Class]] "function".
+ FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
};
@@ -667,6 +680,7 @@
V(Oddball) \
V(SharedFunctionInfo) \
V(JSValue) \
+ V(JSMessageObject) \
V(StringWrapper) \
V(Proxy) \
V(Boolean) \
@@ -1565,7 +1579,8 @@
void LookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
- bool SetElementWithCallbackSetterInPrototypes(uint32_t index, Object* value);
+ MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
+ uint32_t index, Object* value, bool* found);
void LookupCallback(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@@ -3185,6 +3200,7 @@
static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
static const char* PropertyType2String(PropertyType type);
+ static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
inline void Disassemble(const char* name) {
Disassemble(name, stdout);
}
@@ -3261,13 +3277,13 @@
// [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
// the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_start();
- inline void set_safepoint_table_start(unsigned offset);
+ inline unsigned safepoint_table_offset();
+ inline void set_safepoint_table_offset(unsigned offset);
// [stack_check_table_start]: For kind FUNCTION, the offset in the
// instruction stream where the stack check table starts.
- inline unsigned stack_check_table_start();
- inline void set_stack_check_table_start(unsigned offset);
+ inline unsigned stack_check_table_offset();
+ inline void set_stack_check_table_offset(unsigned offset);
// [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call.
@@ -3431,8 +3447,8 @@
static const int kAllowOSRAtLoopNestingLevelOffset =
kHasDeoptimizationSupportOffset + 1;
- static const int kSafepointTableStartOffset = kStackSlotsOffset + kIntSize;
- static const int kStackCheckTableStartOffset = kStackSlotsOffset + kIntSize;
+ static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
+ static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
// Flags layout.
static const int kFlagsICStateShift = 0;
@@ -3578,6 +3594,19 @@
return ((1 << kHasFastElements) & bit_field2()) != 0;
}
+ // Tells whether an instance has pixel array elements.
+ inline void set_has_pixel_array_elements(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kHasPixelArrayElements));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kHasPixelArrayElements));
+ }
+ }
+
+ inline bool has_pixel_array_elements() {
+ return ((1 << kHasPixelArrayElements) & bit_field2()) != 0;
+ }
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -3636,6 +3665,11 @@
// from the descriptors and the fast elements bit cleared.
MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
+ // Returns this map if it has the pixel array elements bit is set, otherwise
+ // returns a copy of the map, with all transitions dropped from the
+ // descriptors and the pixel array elements bit set.
+ MUST_USE_RESULT inline MaybeObject* GetPixelArrayElementsMap();
+
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -3754,6 +3788,7 @@
static const int kStringWrapperSafeForDefaultValueOf = 3;
static const int kAttachedToSharedFunctionInfo = 4;
static const int kIsShared = 5;
+ static const int kHasPixelArrayElements = 6;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -4066,7 +4101,6 @@
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
inline bool HasBuiltinFunctionId();
- inline bool IsBuiltinMathFunction();
inline BuiltinFunctionId builtin_function_id();
// [script info]: Script from which the function originates.
@@ -4163,6 +4197,10 @@
inline bool optimization_disabled();
inline void set_optimization_disabled(bool value);
+ // Indicates whether the function is a strict mode function.
+ inline bool strict_mode();
+ inline void set_strict_mode(bool value);
+
// Indicates whether or not the code in the shared function support
// deoptimization.
inline bool has_deoptimization_support();
@@ -4344,6 +4382,7 @@
static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 0x7;
static const int kOptimizationDisabled = 7;
+ static const int kStrictModeFunction = 8;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -4689,6 +4728,68 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(JSValue);
};
+
+// Representation of message objects used for error reporting through
+// the API. The messages are formatted in JavaScript so this object is
+// a real JavaScript object. The information used for formatting the
+// error messages are not directly accessible from JavaScript to
+// prevent leaking information to user code called during error
+// formatting.
+class JSMessageObject: public JSObject {
+ public:
+ // [type]: the type of error message.
+ DECL_ACCESSORS(type, String)
+
+ // [arguments]: the arguments for formatting the error message.
+ DECL_ACCESSORS(arguments, JSArray)
+
+ // [script]: the script from which the error message originated.
+ DECL_ACCESSORS(script, Object)
+
+ // [stack_trace]: the stack trace for this error message.
+ DECL_ACCESSORS(stack_trace, Object)
+
+ // [stack_frames]: an array of stack frames for this error object.
+ DECL_ACCESSORS(stack_frames, Object)
+
+ // [start_position]: the start position in the script for the error message.
+ inline int start_position();
+ inline void set_start_position(int value);
+
+ // [end_position]: the end position in the script for the error message.
+ inline int end_position();
+ inline void set_end_position(int value);
+
+ // Casting.
+ static inline JSMessageObject* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef OBJECT_PRINT
+ inline void JSMessageObjectPrint() {
+ JSMessageObjectPrint(stdout);
+ }
+ void JSMessageObjectPrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void JSMessageObjectVerify();
+#endif
+
+ // Layout description.
+ static const int kTypeOffset = JSObject::kHeaderSize;
+ static const int kArgumentsOffset = kTypeOffset + kPointerSize;
+ static const int kScriptOffset = kArgumentsOffset + kPointerSize;
+ static const int kStackTraceOffset = kScriptOffset + kPointerSize;
+ static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
+ static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
+ static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
+ static const int kSize = kEndPositionOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<HeapObject::kMapOffset,
+ kStackFramesOffset + kPointerSize,
+ kSize> BodyDescriptor;
+};
+
+
// Regular expressions
// The regular expression holds a single reference to a FixedArray in
// the kDataOffset field.
@@ -4827,10 +4928,12 @@
public:
// Find cached value for a string key, otherwise return null.
Object* Lookup(String* src);
- Object* LookupEval(String* src, Context* context);
+ Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
MaybeObject* Put(String* src, Object* value);
- MaybeObject* PutEval(String* src, Context* context, Object* value);
+ MaybeObject* PutEval(String* src,
+ Context* context,
+ SharedFunctionInfo* value);
MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
// Remove given value from cache.
diff --git a/src/oprofile-agent.cc b/src/oprofile-agent.cc
deleted file mode 100644
index 6df8f50..0000000
--- a/src/oprofile-agent.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "oprofile-agent.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool OProfileAgent::Initialize() {
-#ifdef ENABLE_OPROFILE_AGENT
- if (FLAG_oprofile) {
- if (handle_ != NULL) return false;
-
- // Disable code moving by GC.
- FLAG_always_compact = false;
- FLAG_never_compact = true;
-
- handle_ = op_open_agent();
- return (handle_ != NULL);
- } else {
- return true;
- }
-#else
- if (FLAG_oprofile) {
- OS::Print("Warning: --oprofile specified but binary compiled without "
- "oprofile support.\n");
- }
- return true;
-#endif
-}
-
-
-void OProfileAgent::TearDown() {
-#ifdef ENABLE_OPROFILE_AGENT
- if (handle_ != NULL) {
- op_close_agent(handle_);
- }
-#endif
-}
-
-
-#ifdef ENABLE_OPROFILE_AGENT
-op_agent_t OProfileAgent::handle_ = NULL;
-
-
-void OProfileAgent::CreateNativeCodeRegion(const char* name,
- const void* ptr, unsigned int size) {
- op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
-}
-
-
-void OProfileAgent::CreateNativeCodeRegion(String* name,
- const void* ptr, unsigned int size) {
- const char* func_name;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- func_name = name->length() > 0 ? *str : "<anonymous>";
- CreateNativeCodeRegion(func_name, ptr, size);
-}
-
-
-void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
- int line_num, const void* ptr, unsigned int size) {
- Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
- const char* func_name;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- func_name = name->length() > 0 ? *str : "<anonymous>";
- SmartPointer<char> source_str =
- source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
- func_name, *source_str, line_num) != -1) {
- CreateNativeCodeRegion(buf.start(), ptr, size);
- } else {
- CreateNativeCodeRegion("<script/func name too long>", ptr, size);
- }
-}
-
-#endif // ENABLE_OPROFILE_AGENT
-
-} } // namespace v8::internal
diff --git a/src/oprofile-agent.h b/src/oprofile-agent.h
deleted file mode 100644
index 4c50f0f..0000000
--- a/src/oprofile-agent.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OPROFILE_AGENT_H_
-#define V8_OPROFILE_AGENT_H_
-
-#include <stdlib.h>
-
-#include "globals.h"
-
-#ifdef ENABLE_OPROFILE_AGENT
-// opagent.h uses uint64_t type, which can be missing in
-// system headers (they have __uint64_t), but is defined
-// in V8's headers.
-#include <opagent.h> // NOLINT
-
-#define OPROFILE(Call) \
- do { \
- if (v8::internal::OProfileAgent::is_enabled()) \
- v8::internal::OProfileAgent::Call; \
- } while (false)
-#else
-#define OPROFILE(Call) ((void) 0)
-#endif
-
-namespace v8 {
-namespace internal {
-
-class OProfileAgent {
- public:
- static bool Initialize();
- static void TearDown();
-#ifdef ENABLE_OPROFILE_AGENT
- static void CreateNativeCodeRegion(const char* name,
- const void* ptr, unsigned int size);
- static void CreateNativeCodeRegion(String* name,
- const void* ptr, unsigned int size);
- static void CreateNativeCodeRegion(String* name, String* source, int line_num,
- const void* ptr, unsigned int size);
- static bool is_enabled() { return handle_ != NULL; }
-
- private:
- static op_agent_t handle_;
-
- // Size of the buffer that is used for composing code areas names.
- static const int kFormattingBufSize = 256;
-#else
- static bool is_enabled() { return false; }
-#endif
-};
-} }
-
-#endif // V8_OPROFILE_AGENT_H_
diff --git a/src/parser.cc b/src/parser.cc
index 6ad9ab3..04d510f 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -283,6 +283,11 @@
void AddLoop() { loop_count_++; }
bool ContainsLoops() const { return loop_count_ > 0; }
+ bool StrictMode() { return strict_mode_; }
+ void EnableStrictMode() {
+ strict_mode_ = FLAG_strict_mode;
+ }
+
private:
// Captures the number of literals that need materialization in the
// function. Includes regexp literals, and boilerplate for object
@@ -300,6 +305,9 @@
// Captures the number of loops inside the scope.
int loop_count_;
+ // Parsing strict mode code.
+ bool strict_mode_;
+
// Bookkeeping
TemporaryScope** variable_;
TemporaryScope* parent_;
@@ -314,6 +322,8 @@
loop_count_(0),
variable_(variable),
parent_(*variable) {
+ // Inherit the strict mode from the parent scope.
+ strict_mode_ = (parent_ != NULL) && parent_->strict_mode_;
*variable = this;
}
@@ -561,7 +571,6 @@
int prev_level_;
};
-
// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
@@ -607,7 +616,8 @@
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
- bool in_global_context) {
+ bool in_global_context,
+ StrictModeFlag strict_mode) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse);
@@ -623,17 +633,18 @@
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, &zone_scope);
+ return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(source, in_global_context, &zone_scope);
+ return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
}
}
FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
bool in_global_context,
+ StrictModeFlag strict_mode,
ZoneScope* zone_scope) {
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
@@ -653,9 +664,16 @@
LexicalScope lexical_scope(&this->top_scope_, &this->with_nesting_level_,
scope);
TemporaryScope temp_scope(&this->temp_scope_);
+ if (strict_mode == kStrictMode) {
+ temp_scope.EnableStrictMode();
+ }
ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
bool ok = true;
+ int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, &ok);
+ if (ok && temp_scope_->StrictMode()) {
+ CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
+ }
if (ok) {
result = new FunctionLiteral(
no_name,
@@ -669,7 +687,8 @@
0,
source->length(),
false,
- temp_scope.ContainsLoops());
+ temp_scope.ContainsLoops(),
+ temp_scope.StrictMode());
} else if (stack_overflow_) {
Top::StackOverflow();
}
@@ -733,14 +752,18 @@
scope);
TemporaryScope temp_scope(&this->temp_scope_);
+ if (info->strict_mode()) {
+ temp_scope.EnableStrictMode();
+ }
+
FunctionLiteralType type =
info->is_expression() ? EXPRESSION : DECLARATION;
bool ok = true;
- result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
+ result = ParseFunctionLiteral(name,
+ false, // Strict mode name already checked.
+ RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
- // The only errors should be stack overflows.
- ASSERT(ok || stack_overflow_);
}
// Make sure the target stack is empty.
@@ -749,8 +772,8 @@
// If there was a stack overflow we have to get rid of AST and it is
// not safe to do before scope has been deleted.
if (result == NULL) {
- Top::StackOverflow();
zone_scope->DeleteOnExit();
+ if (stack_overflow_) Top::StackOverflow();
} else {
Handle<String> inferred_name(info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -1075,9 +1098,46 @@
ASSERT(processor != NULL);
InitializationBlockFinder block_finder;
ThisNamedPropertyAssigmentFinder this_property_assignment_finder;
+ bool directive_prologue = true; // Parsing directive prologue.
+
while (peek() != end_token) {
+ if (directive_prologue && peek() != Token::STRING) {
+ directive_prologue = false;
+ }
+
+ Scanner::Location token_loc = scanner().peek_location();
Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat == NULL || stat->IsEmpty()) continue;
+
+ if (stat == NULL || stat->IsEmpty()) {
+ directive_prologue = false; // End of directive prologue.
+ continue;
+ }
+
+ if (directive_prologue) {
+ // A shot at a directive.
+ ExpressionStatement *e_stat;
+ Literal *literal;
+ // Still processing directive prologue?
+ if ((e_stat = stat->AsExpressionStatement()) != NULL &&
+ (literal = e_stat->expression()->AsLiteral()) != NULL &&
+ literal->handle()->IsString()) {
+ Handle<String> directive = Handle<String>::cast(literal->handle());
+
+ // Check "use strict" directive (ES5 14.1).
+ if (!temp_scope_->StrictMode() &&
+ directive->Equals(Heap::use_strict()) &&
+ token_loc.end_pos - token_loc.beg_pos ==
+ Heap::use_strict()->length() + 2) {
+ temp_scope_->EnableStrictMode();
+ // "use strict" is the only directive for now.
+ directive_prologue = false;
+ }
+ } else {
+ // End of the directive prologue.
+ directive_prologue = false;
+ }
+ }
+
// We find and mark the initialization blocks on top level code only.
// This is because the optimization prevents reuse of the map transitions,
// so it should be used only for code that will only be run once.
@@ -1388,8 +1448,10 @@
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ bool is_reserved = false;
+ Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
+ is_reserved,
function_token_position,
DECLARATION,
CHECK_OK);
@@ -1431,6 +1493,10 @@
return result;
}
+static bool IsEvalOrArguments(Handle<String> string) {
+ return string.is_identical_to(Factory::eval_symbol()) ||
+ string.is_identical_to(Factory::arguments_symbol());
+}
// If the variable declaration declares exactly one non-const
// variable, then *var is set to that variable. In all other cases,
@@ -1479,6 +1545,13 @@
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
+ // Strict mode variables may not be named eval or arguments
+ if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
+ ReportMessage("strict_var_name", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
// assignment for variables and constants because the value must be assigned
@@ -1637,7 +1710,7 @@
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
- bool starts_with_idenfifier = (peek() == Token::IDENTIFIER);
+ bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
if (peek() == Token::COLON && starts_with_idenfifier && expr &&
expr->AsVariableProxy() != NULL &&
@@ -1839,6 +1912,13 @@
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
+
+ if (temp_scope_->StrictMode()) {
+ ReportMessage("strict_mode_with", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
Expect(Token::LPAREN, CHECK_OK);
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -1971,6 +2051,13 @@
Expect(Token::LPAREN, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
+
+ if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
+ ReportMessage("strict_catch_variable", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
Expect(Token::RPAREN, CHECK_OK);
if (peek() == Token::LBRACE) {
@@ -2216,6 +2303,11 @@
expression = NewThrowReferenceError(type);
}
+ if (temp_scope_->StrictMode()) {
+ // Assignment to eval or arguments is disallowed in strict mode.
+ CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
+ }
+
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@@ -2442,6 +2534,12 @@
Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type);
}
+
+ if (temp_scope_->StrictMode()) {
+ // Prefix expression operand in strict mode may not be eval or arguments.
+ CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
+ }
+
int position = scanner().location().beg_pos;
IncrementOperation* increment = new IncrementOperation(op, expression);
return new CountOperation(true /* prefix */, increment, position);
@@ -2467,6 +2565,12 @@
Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
expression = NewThrowReferenceError(type);
}
+
+ if (temp_scope_->StrictMode()) {
+ // Postfix expression operand in strict mode may not be eval or arguments.
+ CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
+ }
+
Token::Value next = Next();
int position = scanner().location().beg_pos;
IncrementOperation* increment = new IncrementOperation(next, expression);
@@ -2595,9 +2699,12 @@
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
Handle<String> name;
- if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
- result = ParseFunctionLiteral(name, function_token_position,
- NESTED, CHECK_OK);
+ bool is_reserved_name = false;
+ if (peek_any_identifier()) {
+ name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
+ }
+ result = ParseFunctionLiteral(name, is_reserved_name,
+ function_token_position, NESTED, CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
@@ -2666,6 +2773,11 @@
case Token::IDENTIFIER:
return ReportMessage("unexpected_token_identifier",
Vector<const char*>::empty());
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessage(temp_scope_->StrictMode() ?
+ "unexpected_strict_reserved" :
+ "unexpected_token_identifier",
+ Vector<const char*>::empty());
default:
const char* name = Token::String(token);
ASSERT(name != NULL);
@@ -2721,7 +2833,8 @@
result = new Literal(Factory::false_value());
break;
- case Token::IDENTIFIER: {
+ case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
result = top_scope_->NewUnresolved(name, inside_with());
@@ -2940,6 +3053,125 @@
return Factory::undefined_value();
}
+// Defined in ast.cc
+bool IsEqualString(void* first, void* second);
+bool IsEqualNumber(void* first, void* second);
+
+
+// Validation per 11.1.5 Object Initialiser
+class ObjectLiteralPropertyChecker {
+ public:
+ ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
+ props(&IsEqualString),
+ elems(&IsEqualNumber),
+ parser_(parser),
+ strict_(strict) {
+ }
+
+ void CheckProperty(
+ ObjectLiteral::Property* property,
+ Scanner::Location loc,
+ bool* ok);
+
+ private:
+ enum PropertyKind {
+ kGetAccessor = 0x01,
+ kSetAccessor = 0x02,
+ kAccessor = kGetAccessor | kSetAccessor,
+ kData = 0x04
+ };
+
+ static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
+ switch (property->kind()) {
+ case ObjectLiteral::Property::GETTER:
+ return kGetAccessor;
+ case ObjectLiteral::Property::SETTER:
+ return kSetAccessor;
+ default:
+ return kData;
+ }
+ }
+
+ HashMap props;
+ HashMap elems;
+ Parser* parser_;
+ bool strict_;
+};
+
+
+void ObjectLiteralPropertyChecker::CheckProperty(
+ ObjectLiteral::Property* property,
+ Scanner::Location loc,
+ bool* ok) {
+
+ ASSERT(property != NULL);
+
+ Literal *lit = property->key();
+ Handle<Object> handle = lit->handle();
+
+ uint32_t hash;
+ HashMap* map;
+ void* key;
+
+ if (handle->IsSymbol()) {
+ Handle<String> name(String::cast(*handle));
+ if (name->AsArrayIndex(&hash)) {
+ Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
+ key = key_handle.location();
+ map = &elems;
+ } else {
+ key = handle.location();
+ hash = name->Hash();
+ map = &props;
+ }
+ } else if (handle->ToArrayIndex(&hash)) {
+ key = handle.location();
+ map = &elems;
+ } else {
+ ASSERT(handle->IsNumber());
+ double num = handle->Number();
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str = DoubleToCString(num, buffer);
+ Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
+ key = name.location();
+ hash = name->Hash();
+ map = &props;
+ }
+
+ // Lookup property previously defined, if any.
+ HashMap::Entry* entry = map->Lookup(key, hash, true);
+ intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
+ intptr_t curr = GetPropertyKind(property);
+
+ // Duplicate data properties are illegal in strict mode.
+ if (strict_ && (curr & prev & kData) != 0) {
+ parser_->ReportMessageAt(loc, "strict_duplicate_property",
+ Vector<const char*>::empty());
+ *ok = false;
+ return;
+ }
+ // Data property conflicting with an accessor.
+ if (((curr & kData) && (prev & kAccessor)) ||
+ ((prev & kData) && (curr & kAccessor))) {
+ parser_->ReportMessageAt(loc, "accessor_data_property",
+ Vector<const char*>::empty());
+ *ok = false;
+ return;
+ }
+ // Two accessors of the same type conflicting
+ if ((curr & prev & kAccessor) != 0) {
+ parser_->ReportMessageAt(loc, "accessor_get_set",
+ Vector<const char*>::empty());
+ *ok = false;
+ return;
+ }
+
+ // Update map
+ entry->value = reinterpret_cast<void*> (prev | curr);
+ *ok = true;
+}
+
void Parser::BuildObjectLiteralConstantProperties(
ZoneList<ObjectLiteral::Property*>* properties,
@@ -3009,6 +3241,7 @@
Token::Value next = Next();
bool is_keyword = Token::IsKeyword(next);
if (next == Token::IDENTIFIER || next == Token::NUMBER ||
+ next == Token::FUTURE_RESERVED_WORD ||
next == Token::STRING || is_keyword) {
Handle<String> name;
if (is_keyword) {
@@ -3018,6 +3251,7 @@
}
FunctionLiteral* value =
ParseFunctionLiteral(name,
+ false, // reserved words are allowed here
RelocInfo::kNoPosition,
DECLARATION,
CHECK_OK);
@@ -3045,13 +3279,22 @@
new ZoneList<ObjectLiteral::Property*>(4);
int number_of_boilerplate_properties = 0;
+ ObjectLiteralPropertyChecker checker(this, temp_scope_->StrictMode());
+
Expect(Token::LBRACE, CHECK_OK);
+ Scanner::Location loc = scanner().location();
+
while (peek() != Token::RBRACE) {
if (fni_ != NULL) fni_->Enter();
Literal* key = NULL;
Token::Value next = peek();
+
+ // Location of the property name token
+ Scanner::Location loc = scanner().peek_location();
+
switch (next) {
+ case Token::FUTURE_RESERVED_WORD:
case Token::IDENTIFIER: {
bool is_getter = false;
bool is_setter = false;
@@ -3060,11 +3303,15 @@
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
+ // Update loc to point to the identifier
+ loc = scanner().peek_location();
ObjectLiteral::Property* property =
ParseObjectLiteralGetSet(is_getter, CHECK_OK);
if (IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++;
}
+ // Validate the property.
+ checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property);
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
@@ -3121,6 +3368,8 @@
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
+ // Validate the property
+ checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property);
// TODO(1240767): Consider allowing trailing comma.
@@ -3132,6 +3381,7 @@
}
}
Expect(Token::RBRACE, CHECK_OK);
+
// Computation of literal_index must happen before pre parse bailout.
int literal_index = temp_scope_->NextMaterializedLiteralIndex();
@@ -3193,6 +3443,7 @@
FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
+ bool name_is_reserved,
int function_token_position,
FunctionLiteralType type,
bool* ok) {
@@ -3224,12 +3475,36 @@
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
int start_pos = scanner().location().beg_pos;
+ Scanner::Location name_loc = Scanner::NoLocation();
+ Scanner::Location dupe_loc = Scanner::NoLocation();
+ Scanner::Location reserved_loc = Scanner::NoLocation();
+
bool done = (peek() == Token::RPAREN);
while (!done) {
- Handle<String> param_name = ParseIdentifier(CHECK_OK);
- top_scope_->AddParameter(top_scope_->DeclareLocal(param_name,
- Variable::VAR));
+ bool is_reserved = false;
+ Handle<String> param_name =
+ ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+
+ // Store locations for possible future error reports.
+ if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
+ name_loc = scanner().location();
+ }
+ if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+ dupe_loc = scanner().location();
+ }
+ if (!reserved_loc.IsValid() && is_reserved) {
+ reserved_loc = scanner().location();
+ }
+
+ Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
+ top_scope_->AddParameter(parameter);
num_parameters++;
+ if (num_parameters > kMaxNumFunctionParameters) {
+ ReportMessageAt(scanner().location(), "too_many_parameters",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
done = (peek() == Token::RPAREN);
if (!done) Expect(Token::COMMA, CHECK_OK);
}
@@ -3300,6 +3575,49 @@
end_pos = scanner().location().end_pos;
}
+ // Validate strict mode.
+ if (temp_scope_->StrictMode()) {
+ if (IsEvalOrArguments(name)) {
+ int position = function_token_position != RelocInfo::kNoPosition
+ ? function_token_position
+ : (start_pos > 0 ? start_pos - 1 : start_pos);
+ Scanner::Location location = Scanner::Location(position, start_pos);
+ ReportMessageAt(location,
+ "strict_function_name", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ if (name_loc.IsValid()) {
+ ReportMessageAt(name_loc, "strict_param_name",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ if (dupe_loc.IsValid()) {
+ ReportMessageAt(dupe_loc, "strict_param_dupe",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ if (name_is_reserved) {
+ int position = function_token_position != RelocInfo::kNoPosition
+ ? function_token_position
+ : (start_pos > 0 ? start_pos - 1 : start_pos);
+ Scanner::Location location = Scanner::Location(position, start_pos);
+ ReportMessageAt(location, "strict_reserved_word",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ if (reserved_loc.IsValid()) {
+ ReportMessageAt(reserved_loc, "strict_reserved_word",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
+ }
+
FunctionLiteral* function_literal =
new FunctionLiteral(name,
top_scope_,
@@ -3312,7 +3630,8 @@
start_pos,
end_pos,
function_name->length() > 0,
- temp_scope.ContainsLoops());
+ temp_scope.ContainsLoops(),
+ temp_scope.StrictMode());
function_literal->set_function_token_position(function_token_position);
if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
@@ -3367,6 +3686,13 @@
}
+bool Parser::peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD;
+}
+
+
void Parser::Consume(Token::Value token) {
Token::Value next = Next();
USE(next);
@@ -3426,7 +3752,22 @@
Handle<String> Parser::ParseIdentifier(bool* ok) {
- Expect(Token::IDENTIFIER, ok);
+ bool is_reserved;
+ return ParseIdentifierOrReservedWord(&is_reserved, ok);
+}
+
+
+Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
+ bool* ok) {
+ *is_reserved = false;
+ if (temp_scope_->StrictMode()) {
+ Expect(Token::IDENTIFIER, ok);
+ } else {
+ if (!Check(Token::IDENTIFIER)) {
+ Expect(Token::FUTURE_RESERVED_WORD, ok);
+ *is_reserved = true;
+ }
+ }
if (!*ok) return Handle<String>();
return GetSymbol(ok);
}
@@ -3434,7 +3775,9 @@
Handle<String> Parser::ParseIdentifierName(bool* ok) {
Token::Value next = Next();
- if (next != Token::IDENTIFIER && !Token::IsKeyword(next)) {
+ if (next != Token::IDENTIFIER &&
+ next != Token::FUTURE_RESERVED_WORD &&
+ !Token::IsKeyword(next)) {
ReportUnexpectedToken(next);
*ok = false;
return Handle<String>();
@@ -3443,21 +3786,49 @@
}
+// Checks LHS expression for assignment and prefix/postfix increment/decrement
+// in strict mode.
+void Parser::CheckStrictModeLValue(Expression* expression,
+ const char* error,
+ bool* ok) {
+ ASSERT(temp_scope_->StrictMode());
+ VariableProxy* lhs = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+
+ if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
+ ReportMessage(error, Vector<const char*>::empty());
+ *ok = false;
+ }
+}
+
+
+// Checks whether octal literal last seen is between beg_pos and end_pos.
+// If so, reports an error.
+void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ int octal = scanner().octal_position();
+ if (beg_pos <= octal && octal <= end_pos) {
+ ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
+ Vector<const char*>::empty());
+ scanner().clear_octal_position();
+ *ok = false;
+ }
+}
+
+
// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'. The reason for not using ParseIdentifier and
-// checking on the output is that this involves heap allocation which
-// we can't do during preparsing.
+// is 'get' or 'set'.
Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set,
bool* ok) {
- Expect(Token::IDENTIFIER, ok);
+ Handle<String> result = ParseIdentifier(ok);
if (!*ok) return Handle<String>();
if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
const char* token = scanner().literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
- return GetSymbol(ok);
+ return result;
}
@@ -3552,16 +3923,15 @@
Handle<String> type,
Vector< Handle<Object> > arguments) {
int argc = arguments.length();
- Handle<JSArray> array = Factory::NewJSArray(argc, TENURED);
- ASSERT(array->IsJSArray() && array->HasFastElements());
+ Handle<FixedArray> elements = Factory::NewFixedArray(argc, TENURED);
for (int i = 0; i < argc; i++) {
Handle<Object> element = arguments[i];
if (!element.is_null()) {
- // We know this doesn't cause a GC here because we allocated the JSArray
- // large enough.
- array->SetFastElement(i, *element)->ToObjectUnchecked();
+ elements->set(i, *element);
}
}
+ Handle<JSArray> array = Factory::NewJSArrayWithElements(elements, TENURED);
+
ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
args->Add(new Literal(type));
args->Add(new Literal(array));
@@ -3599,6 +3969,7 @@
message = "unexpected_token_string";
break;
case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD:
message = "unexpected_token_identifier";
break;
default:
@@ -3645,16 +4016,10 @@
Handle<Object> JsonParser::ParseJsonValue() {
Token::Value token = scanner_.Next();
switch (token) {
- case Token::STRING: {
+ case Token::STRING:
return GetString();
- }
- case Token::NUMBER: {
- ASSERT(scanner_.is_literal_ascii());
- double value = StringToDouble(scanner_.literal_ascii_string(),
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
- return Factory::NewNumber(value);
- }
+ case Token::NUMBER:
+ return Factory::NewNumber(scanner_.number());
case Token::FALSE_LITERAL:
return Factory::false_value();
case Token::TRUE_LITERAL:
@@ -3696,6 +4061,11 @@
uint32_t index;
if (key->AsArrayIndex(&index)) {
SetOwnElement(json_object, index, value);
+ } else if (key->Equals(Heap::Proto_symbol())) {
+ // We can't remove the __proto__ accessor since it's hardcoded
+ // in several places. Instead go along and add the value as
+ // the prototype of the created object if possible.
+ SetPrototype(json_object, value);
} else {
SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
}
@@ -4728,7 +5098,9 @@
ASSERT(Top::has_pending_exception());
} else {
Handle<String> source = Handle<String>(String::cast(script->source()));
- result = parser.ParseProgram(source, info->is_global());
+ result = parser.ParseProgram(source,
+ info->is_global(),
+ info->StrictMode());
}
}
diff --git a/src/parser.h b/src/parser.h
index 0613a8d..dfd909a 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -423,7 +423,8 @@
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source,
- bool in_global_context);
+ bool in_global_context,
+ StrictModeFlag strict_mode);
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
@@ -435,6 +436,11 @@
Vector<Handle<String> > args);
protected:
+ // Limit on number of function parameters is chosen arbitrarily.
+ // Code::Flags uses only the low 17 bits of num-parameters to
+ // construct a hashable id, so if more than 2^17 are allowed, this
+ // should be checked.
+ static const int kMaxNumFunctionParameters = 32766;
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
UC16CharacterStream* source,
ZoneScope* zone_scope);
@@ -446,6 +452,7 @@
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context,
+ StrictModeFlag strict_mode,
ZoneScope* zone_scope);
// Report syntax error
@@ -546,6 +553,7 @@
ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
+ bool name_is_reserved,
int function_token_position,
FunctionLiteralType type,
bool* ok);
@@ -575,6 +583,8 @@
return scanner().Next();
}
+ bool peek_any_identifier();
+
INLINE(void Consume(Token::Value token));
void Expect(Token::Value token, bool* ok);
bool Check(Token::Value token);
@@ -608,11 +618,20 @@
Literal* GetLiteralNumber(double value);
Handle<String> ParseIdentifier(bool* ok);
+ Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
Handle<String> ParseIdentifierName(bool* ok);
Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set,
bool* ok);
+ // Strict mode validation of LValue expressions
+ void CheckStrictModeLValue(Expression* expression,
+ const char* error,
+ bool* ok);
+
+ // Strict mode octal literal validation.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
// Parser support
VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
FunctionLiteral* fun,
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index ad1e499..c18049f 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -215,6 +215,7 @@
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
@@ -222,6 +223,19 @@
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
@@ -416,7 +430,7 @@
Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
- set_names(name);
+ set_name(name);
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 25a9ca0..4a474b4 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -318,6 +318,7 @@
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
@@ -325,6 +326,19 @@
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index ce53305..ea35c1b 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -196,6 +196,7 @@
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
@@ -203,6 +204,19 @@
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index f1b7695..49d3dd9 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -242,6 +242,12 @@
}
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
UNIMPLEMENTED();
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 5de6081..0002dd7 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -213,6 +213,7 @@
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
@@ -220,6 +221,19 @@
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index ab5c0a3..256dc75 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -122,6 +122,11 @@
}
+bool OS::Remove(const char* path) {
+ return (remove(path) == 0);
+}
+
+
const char* OS::LogFileOpenMode = "w";
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index dc4493a..556e26b 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -226,6 +226,7 @@
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
@@ -233,6 +234,19 @@
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index bf1737a..b5a85f6 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -670,6 +670,11 @@
}
+bool OS::Remove(const char* path) {
+ return (DeleteFileA(path) != 0);
+}
+
+
// Open log file in binary mode to avoid /n -> /r/n conversion.
const char* OS::LogFileOpenMode = "wb";
@@ -911,17 +916,44 @@
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
- Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory)
- : file_(file), file_mapping_(file_mapping), memory_(memory) { }
+ Win32MemoryMappedFile(HANDLE file,
+ HANDLE file_mapping,
+ void* memory,
+ int size)
+ : file_(file),
+ file_mapping_(file_mapping),
+ memory_(memory),
+ size_(size) { }
virtual ~Win32MemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
HANDLE file_;
HANDLE file_mapping_;
void* memory_;
+ int size_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ // Open a physical file
+ HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+ if (file == NULL) return NULL;
+
+ int size = static_cast<int>(GetFileSize(file, NULL));
+
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, NULL,
+ PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+ if (file_mapping == NULL) return NULL;
+
+ // Map a view of the file into memory
+ void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+ return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
// Open a physical file
@@ -935,7 +967,7 @@
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
if (memory) memmove(memory, initial, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory);
+ return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
diff --git a/src/platform.h b/src/platform.h
index 7b17067..0d7d2e9 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -174,6 +174,7 @@
static int GetLastError();
static FILE* FOpen(const char* path, const char* mode);
+ static bool Remove(const char* path);
// Log file open mode is platform-dependent due to line ends issues.
static const char* LogFileOpenMode;
@@ -251,9 +252,11 @@
class MemoryMappedFile {
public:
+ static MemoryMappedFile* open(const char* name);
static MemoryMappedFile* create(const char* name, int size, void* initial);
virtual ~MemoryMappedFile() { }
virtual void* memory() = 0;
+ virtual int size() = 0;
};
// Safe formatting print. Ensures that str is always null-terminated.
diff --git a/src/preparser.cc b/src/preparser.cc
index c0dcc0b..252e88f 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -83,6 +83,7 @@
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_string", NULL);
case i::Token::IDENTIFIER:
+ case i::Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_identifier", NULL);
default:
@@ -790,7 +791,7 @@
Expression result = kUnknownExpression;
if (peek() == i::Token::FUNCTION) {
Consume(i::Token::FUNCTION);
- if (peek() == i::Token::IDENTIFIER) {
+ if (peek_any_identifier()) {
ParseIdentifier(CHECK_OK);
}
result = ParseFunctionLiteral(CHECK_OK);
@@ -858,7 +859,8 @@
break;
}
- case i::Token::IDENTIFIER: {
+ case i::Token::IDENTIFIER:
+ case i::Token::FUTURE_RESERVED_WORD: {
ParseIdentifier(CHECK_OK);
result = kIdentifierExpression;
break;
@@ -946,7 +948,8 @@
while (peek() != i::Token::RBRACE) {
i::Token::Value next = peek();
switch (next) {
- case i::Token::IDENTIFIER: {
+ case i::Token::IDENTIFIER:
+ case i::Token::FUTURE_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
@@ -954,6 +957,7 @@
i::Token::Value name = Next();
bool is_keyword = i::Token::IsKeyword(name);
if (name != i::Token::IDENTIFIER &&
+ name != i::Token::FUTURE_RESERVED_WORD &&
name != i::Token::NUMBER &&
name != i::Token::STRING &&
!is_keyword) {
@@ -1151,7 +1155,9 @@
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- Expect(i::Token::IDENTIFIER, ok);
+ if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
+ Expect(i::Token::IDENTIFIER, ok);
+ }
if (!*ok) return kUnknownIdentifier;
return GetIdentifierSymbol();
}
@@ -1166,7 +1172,8 @@
i::StrLength(keyword)));
return kUnknownExpression;
}
- if (next == i::Token::IDENTIFIER) {
+ if (next == i::Token::IDENTIFIER ||
+ next == i::Token::FUTURE_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
@@ -1175,19 +1182,23 @@
// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'. The reason for not using ParseIdentifier and
-// checking on the output is that this involves heap allocation which
-// we can't do during preparsing.
+// is 'get' or 'set'.
PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* is_set,
bool* ok) {
- Expect(i::Token::IDENTIFIER, CHECK_OK);
+ PreParser::Identifier result = ParseIdentifier(CHECK_OK);
if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
const char* token = scanner_->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
- return GetIdentifierSymbol();
+ return result;
+}
+
+bool PreParser::peek_any_identifier() {
+ i::Token::Value next = peek();
+ return next == i::Token::IDENTIFIER ||
+ next == i::Token::FUTURE_RESERVED_WORD;
}
#undef CHECK_OK
diff --git a/src/preparser.h b/src/preparser.h
index 66fad3b..b7fa6c7 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -243,6 +243,8 @@
return scanner_->Next();
}
+ bool peek_any_identifier();
+
void Consume(i::Token::Value token) { Next(); }
void Expect(i::Token::Value token, bool* ok) {
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 211f3f6..dda7abb 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -297,13 +297,13 @@
Print("parameter[%d]", node->index());
break;
case Slot::LOCAL:
- Print("frame[%d]", node->index());
+ Print("local[%d]", node->index());
break;
case Slot::CONTEXT:
- Print(".context[%d]", node->index());
+ Print("context[%d]", node->index());
break;
case Slot::LOOKUP:
- Print(".context[");
+ Print("lookup[");
PrintLiteral(node->var()->name(), false);
Print("]");
break;
@@ -999,24 +999,7 @@
void AstPrinter::VisitSlot(Slot* node) {
PrintIndented("SLOT ");
- switch (node->type()) {
- case Slot::PARAMETER:
- Print("parameter[%d]", node->index());
- break;
- case Slot::LOCAL:
- Print("frame[%d]", node->index());
- break;
- case Slot::CONTEXT:
- Print(".context[%d]", node->index());
- break;
- case Slot::LOOKUP:
- Print(".context[");
- PrintLiteral(node->var()->name(), false);
- Print("]");
- break;
- default:
- UNREACHABLE();
- }
+ PrettyPrinter::VisitSlot(node);
Print("\n");
}
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 4476cb8..06ee333 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2285,7 +2285,7 @@
// The algorithm is based on the article:
// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
-// Softw. Pract. Exper. 4 (2001), pp. 1–10.
+// Softw. Pract. Exper. 4 (2001), pp. 1-10.
bool HeapSnapshotGenerator::BuildDominatorTree(
const Vector<HeapEntry*>& entries,
Vector<HeapEntry*>* dominators) {
diff --git a/src/regexp.js b/src/regexp.js
index 0de66c6..5b7e3a9 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -52,7 +52,7 @@
var multiline = false;
for (var i = 0; i < flags.length; i++) {
- var c = StringCharAt.call(flags, i);
+ var c = %_CallFunction(flags, i, StringCharAt);
switch (c) {
case 'g':
// Allow duplicate flags to be consistent with JSC and others.
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index d7792ac..3406cdc 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -193,22 +193,9 @@
if (maybe_check_code->ToObject(&check_code)) {
Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
Code* unoptimized_code = shared->code();
- // Iterate the unoptimized code and patch every stack check except at
- // the function entry. This code assumes the function entry stack
- // check appears first i.e., is not deferred or otherwise reordered.
- bool first = true;
- for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
- !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->target_address() == Code::cast(check_code)->entry()) {
- if (first) {
- first = false;
- } else {
- Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
- }
- }
- }
+ Deoptimizer::PatchStackCheckCode(unoptimized_code,
+ Code::cast(check_code),
+ replacement_code);
}
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 2f1f54c..ef7a4ac 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -644,6 +644,90 @@
}
+static bool CheckAccessException(LookupResult* result,
+ v8::AccessType access_type) {
+ if (result->type() == CALLBACKS) {
+ Object* callback = result->GetCallbackObject();
+ if (callback->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(callback);
+ bool can_access =
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ return can_access;
+ }
+ }
+
+ return false;
+}
+
+
+static bool CheckAccess(JSObject* obj,
+ String* name,
+ LookupResult* result,
+ v8::AccessType access_type) {
+ ASSERT(result->IsProperty());
+
+ JSObject* holder = result->holder();
+ JSObject* current = obj;
+ while (true) {
+ if (current->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(current, name, access_type)) {
+ // Access check callback denied the access, but some properties
+ // can have a special permissions which override callbacks descision
+ // (currently see v8::AccessControl).
+ break;
+ }
+
+ if (current == holder) {
+ return true;
+ }
+
+ current = JSObject::cast(current->GetPrototype());
+ }
+
+ // API callbacks can have per callback access exceptions.
+ switch (result->type()) {
+ case CALLBACKS: {
+ if (CheckAccessException(result, access_type)) {
+ return true;
+ }
+ break;
+ }
+ case INTERCEPTOR: {
+ // If the object has an interceptor, try real named properties.
+ // Overwrite the result to fetch the correct property later.
+ holder->LookupRealNamedProperty(name, result);
+ if (result->IsProperty()) {
+ if (CheckAccessException(result, access_type)) {
+ return true;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ Top::ReportFailedAccessCheck(current, access_type);
+ return false;
+}
+
+
+// TODO(1095): we should traverse hidden prototype hierachy as well.
+static bool CheckElementAccess(JSObject* obj,
+ uint32_t index,
+ v8::AccessType access_type) {
+ if (obj->IsAccessCheckNeeded() &&
+ !Top::MayIndexedAccess(obj, index, access_type)) {
+ return false;
+ }
+
+ return true;
+}
+
+
// Enumerator used as indices into the array returned from GetOwnProperty
enum PropertyDescriptorIndices {
IS_ACCESSOR_INDEX,
@@ -686,7 +770,7 @@
// subsequent cases.
Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
Handle<String> str(String::cast(js_value->value()));
- Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
+ Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, *substr);
@@ -699,8 +783,7 @@
case JSObject::INTERCEPTED_ELEMENT:
case JSObject::FAST_ELEMENT: {
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- Handle<Object> element = GetElement(Handle<Object>(obj), index);
- elms->set(VALUE_INDEX, *element);
+ elms->set(VALUE_INDEX, *GetElement(obj, index));
elms->set(WRITABLE_INDEX, Heap::true_value());
elms->set(ENUMERABLE_INDEX, Heap::true_value());
elms->set(CONFIGURABLE_INDEX, Heap::true_value());
@@ -708,7 +791,14 @@
}
case JSObject::DICTIONARY_ELEMENT: {
- NumberDictionary* dictionary = obj->element_dictionary();
+ Handle<JSObject> holder = obj;
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsNull()) return Heap::undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ holder = Handle<JSObject>(JSObject::cast(proto));
+ }
+ NumberDictionary* dictionary = holder->element_dictionary();
int entry = dictionary->FindEntry(index);
ASSERT(entry != NumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -718,14 +808,18 @@
FixedArray* callbacks =
FixedArray::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
- elms->set(GETTER_INDEX, callbacks->get(0));
- elms->set(SETTER_INDEX, callbacks->get(1));
+ if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
+ elms->set(GETTER_INDEX, callbacks->get(0));
+ }
+ if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
+ elms->set(SETTER_INDEX, callbacks->get(1));
+ }
break;
}
case NORMAL:
// This is a data property.
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+ elms->set(VALUE_INDEX, *GetElement(obj, index));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
break;
default:
@@ -745,35 +839,41 @@
if (!result.IsProperty()) {
return Heap::undefined_value();
}
- if (result.type() == CALLBACKS) {
- Object* structure = result.GetCallbackObject();
- if (structure->IsProxy() || structure->IsAccessorInfo()) {
- // Property that is internally implemented as a callback or
- // an API defined callback.
- Object* value;
- { MaybeObject* maybe_value = obj->GetPropertyWithCallback(
- *obj, structure, *name, result.holder());
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, value);
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
- } else if (structure->IsFixedArray()) {
- // __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
- elms->set(GETTER_INDEX, FixedArray::cast(structure)->get(0));
- elms->set(SETTER_INDEX, FixedArray::cast(structure)->get(1));
- } else {
- return Heap::undefined_value();
- }
- } else {
- elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
- elms->set(VALUE_INDEX, result.GetLazyValue());
- elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
+
+ if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
+ return Heap::false_value();
}
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
+
+ bool is_js_accessor = (result.type() == CALLBACKS) &&
+ (result.GetCallbackObject()->IsFixedArray());
+
+ if (is_js_accessor) {
+ // __defineGetter__/__defineSetter__ callback.
+ elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+
+ FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
+ if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+ elms->set(GETTER_INDEX, structure->get(0));
+ }
+ if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+ elms->set(SETTER_INDEX, structure->get(1));
+ }
+ } else {
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
+
+ PropertyAttributes attrs;
+ Object* value;
+ // GetProperty will check access and report any violations.
+ { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
+ if (!maybe_value->ToObject(&value)) return maybe_value;
+ }
+ elms->set(VALUE_INDEX, value);
+ }
+
return *desc;
}
@@ -784,10 +884,17 @@
return obj->PreventExtensions();
}
+
static MaybeObject* Runtime_IsExtensible(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
- return obj->map()->is_extensible() ? Heap::true_value()
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsNull()) return Heap::false_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = JSObject::cast(proto);
+ }
+ return obj->map()->is_extensible() ? Heap::true_value()
: Heap::false_value();
}
@@ -982,7 +1089,7 @@
const char* type = (lookup.IsReadOnly()) ? "const" : "var";
return ThrowRedeclarationError(type, name);
}
- SetProperty(global, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes));
} else {
// If a property with this name does not already exist on the
// global object add the property locally. We take special
@@ -990,10 +1097,12 @@
// of callbacks in the prototype chain (this rules out using
// SetProperty). Also, we must use the handle-based version to
// avoid GC issues.
- SetLocalPropertyIgnoreAttributes(global, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE(
+ SetLocalPropertyIgnoreAttributes(global, name, value, attributes));
}
}
+ ASSERT(!Top::has_pending_exception());
return Heap::undefined_value();
}
@@ -1043,12 +1152,14 @@
} else {
// The holder is an arguments object.
Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
- SetElement(arguments, index, initial_value);
+ Handle<Object> result = SetElement(arguments, index, initial_value);
+ if (result.is_null()) return Failure::Exception();
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
- SetProperty(context_ext, name, initial_value, mode);
+ RETURN_IF_EMPTY_HANDLE(
+ SetProperty(context_ext, name, initial_value, mode));
}
}
@@ -1075,8 +1186,7 @@
ASSERT(!context_ext->HasLocalProperty(*name));
Handle<Object> value(Heap::undefined_value());
if (*initial_value != NULL) value = initial_value;
- SetProperty(context_ext, name, value, mode);
- ASSERT(context_ext->GetLocalPropertyAttribute(*name) == mode);
+ RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode));
}
return Heap::undefined_value();
@@ -1225,12 +1335,12 @@
// with setting the value because the property is either absent or
// read-only. We also have to do redo the lookup.
HandleScope handle_scope;
- Handle<GlobalObject>global(Top::context()->global());
+ Handle<GlobalObject> global(Top::context()->global());
- // BUG 1213579: Handle the case where we have to set a read-only
+ // BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
- SetProperty(global, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes));
return *value;
}
@@ -1312,7 +1422,7 @@
// context.
if (attributes == ABSENT) {
Handle<JSObject> global = Handle<JSObject>(Top::context()->global());
- SetProperty(global, name, value, NONE);
+ RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, NONE));
return *value;
}
@@ -1349,14 +1459,8 @@
// The property was found in a different context extension object.
// Set it if it is not a read-only property.
if ((attributes & READ_ONLY) == 0) {
- Handle<Object> set = SetProperty(context_ext, name, value, attributes);
- // Setting a property might throw an exception. Exceptions
- // are converted to empty handles in handle operations. We
- // need to convert back to exceptions here.
- if (set.is_null()) {
- ASSERT(Top::has_pending_exception());
- return Failure::Exception();
- }
+ RETURN_IF_EMPTY_HANDLE(
+ SetProperty(context_ext, name, value, attributes));
}
}
@@ -3491,8 +3595,10 @@
HandleScope scope;
Handle<String> str = args.at<String>(0);
int index = Smi::cast(args[1])->value();
- Handle<Object> result = GetCharAt(str, index);
- return *result;
+ if (index >= 0 && index < str->length()) {
+ Handle<Object> result = GetCharAt(str, index);
+ return *result;
+ }
}
// Fall back to GetObjectProperty.
@@ -3500,7 +3606,12 @@
args.at<Object>(1));
}
-
+// Implements part of 8.12.9 DefineOwnProperty.
+// There are 3 cases that lead here:
+// Step 4b - define a new accessor property.
+// Steps 9c & 12 - replace an existing data property with an accessor property.
+// Step 12 - update an existing accessor property with an accessor or generic
+// descriptor.
static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
ASSERT(args.length() == 5);
HandleScope scope;
@@ -3532,6 +3643,12 @@
return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
}
+// Implements part of 8.12.9 DefineOwnProperty.
+// There are 3 cases that lead here:
+// Step 4a - define a new data property.
+// Steps 9b & 12 - replace an existing accessor property with a data property.
+// Step 12 - update an existing data property with a data or generic
+// descriptor.
static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
ASSERT(args.length() == 4);
HandleScope scope;
@@ -3555,12 +3672,20 @@
if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
is_element) {
// Normalize the elements to enable attributes on the property.
+ if (js_object->IsJSGlobalProxy()) {
+ Handle<Object> proto(js_object->GetPrototype());
+ // If proxy is detached, ignore the assignment. Alternatively,
+ // we could throw an exception.
+ if (proto->IsNull()) return *obj_value;
+ js_object = Handle<JSObject>::cast(proto);
+ }
NormalizeElements(js_object);
Handle<NumberDictionary> dictionary(js_object->element_dictionary());
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
NumberDictionarySet(dictionary, index, obj_value, details);
+ return *obj_value;
}
LookupResult result;
@@ -3575,6 +3700,11 @@
if (result.IsProperty() &&
(attr != result.GetAttributes() || result.type() == CALLBACKS)) {
// New attributes - normalize to avoid writing to instance descriptor
+ if (js_object->IsJSGlobalProxy()) {
+ // Since the result is a property, the prototype will exist so
+ // we don't have to check for null.
+ js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
+ }
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
@@ -4082,6 +4212,14 @@
CONVERT_CHECKED(JSObject, raw_object, args[0]);
HandleScope scope;
Handle<JSObject> object(raw_object);
+
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype());
+ // If proxy is detached we simply return an empty array.
+ if (proto->IsNull()) return *Factory::NewJSArray(0);
+ object = Handle<JSObject>::cast(proto);
+ }
+
Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
LOCAL_ONLY);
// Some fast paths through GetKeysInFixedArrayFor reuse a cached
@@ -4171,7 +4309,7 @@
ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
+ if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
}
@@ -6564,28 +6702,50 @@
return *result;
}
+
static MaybeObject* Runtime_NewObjectFromBound(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
+ // First argument is a function to use as a constructor.
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_ARG_CHECKED(JSArray, params, 1);
- RUNTIME_ASSERT(params->HasFastElements());
- FixedArray* fixed = FixedArray::cast(params->elements());
+ // Second argument is either null or an array of bound arguments.
+ FixedArray* bound_args = NULL;
+ int bound_argc = 0;
+ if (!args[1]->IsNull()) {
+ CONVERT_ARG_CHECKED(JSArray, params, 1);
+ RUNTIME_ASSERT(params->HasFastElements());
+ bound_args = FixedArray::cast(params->elements());
+ bound_argc = Smi::cast(params->length())->value();
+ }
- int fixed_length = Smi::cast(params->length())->value();
- SmartPointer<Object**> param_data(NewArray<Object**>(fixed_length));
- for (int i = 0; i < fixed_length; i++) {
- Handle<Object> val = Handle<Object>(fixed->get(i));
+ // Find frame containing arguments passed to the caller.
+ JavaScriptFrameIterator it;
+ JavaScriptFrame* frame = it.frame();
+ ASSERT(!frame->is_optimized());
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+ int argc = frame->GetProvidedParametersCount();
+
+ // Prepend bound arguments to caller's arguments.
+ int total_argc = bound_argc + argc;
+ SmartPointer<Object**> param_data(NewArray<Object**>(total_argc));
+ for (int i = 0; i < bound_argc; i++) {
+ Handle<Object> val = Handle<Object>(bound_args->get(i));
param_data[i] = val.location();
}
+ for (int i = 0; i < argc; i++) {
+ Handle<Object> val = Handle<Object>(frame->GetParameter(i));
+ param_data[bound_argc + i] = val.location();
+ }
bool exception = false;
- Handle<Object> result = Execution::New(
- function, fixed_length, *param_data, &exception);
+ Handle<Object> result =
+ Execution::New(function, total_argc, *param_data, &exception);
if (exception) {
return Failure::Exception();
}
+
ASSERT(!result.is_null());
return *result;
}
@@ -6893,7 +7053,7 @@
// the AST id matching the PC.
Address start = unoptimized->instruction_start();
unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_start();
+ Address table_cursor = start + unoptimized->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(table_cursor);
table_cursor += kIntSize;
for (unsigned i = 0; i < table_length; ++i) {
@@ -6944,15 +7104,9 @@
Handle<Code> check_code = check_stub.GetCode();
Handle<Code> replacement_code(
Builtins::builtin(Builtins::OnStackReplacement));
- // Iterate the unoptimized code and revert all the patched stack checks.
- for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask);
- !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->target_address() == replacement_code->entry()) {
- Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
- }
- }
+ Deoptimizer::RevertStackCheckCode(*unoptimized,
+ *check_code,
+ *replacement_code);
// Allow OSR only at nesting level zero again.
unoptimized->set_allow_osr_at_loop_nesting_level(0);
@@ -7049,7 +7203,7 @@
}
-static MaybeObject* Runtime_LookupContext(Arguments args) {
+static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
@@ -7059,16 +7213,31 @@
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
+ Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
- if (index < 0 && !holder.is_null()) {
- ASSERT(holder->IsJSObject());
- return *holder;
+ // If the slot was not found the result is true.
+ if (holder.is_null()) {
+ return Heap::true_value();
}
- // No intermediate context found. Use global object by default.
- return Top::context()->global();
+ // If the slot was found in a context, it should be DONT_DELETE.
+ if (holder->IsContext()) {
+ return Heap::false_value();
+ }
+
+ // The slot was found in a JSObject, either a context extension object,
+ // the global object, or an arguments object. Try to delete it
+ // (respecting DONT_DELETE). For consistency with V8's usual behavior,
+ // which allows deleting all parameters in functions that mention
+ // 'arguments', we do this even for the case of slots found on an
+ // arguments object. The slot was found on an arguments object if the
+ // index is non-negative.
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ if (index >= 0) {
+ return object->DeleteElement(index, JSObject::NORMAL_DELETION);
+ } else {
+ return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
+ }
}
@@ -7141,8 +7310,7 @@
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
+ Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
// If the index is non-negative, the slot has been found in a local
// variable or a parameter. Read it from the context object or the
@@ -7209,19 +7377,23 @@
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
+ Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
if (index >= 0) {
if (holder->IsContext()) {
// Ignore if read_only variable.
if ((attributes & READ_ONLY) == 0) {
- Handle<Context>::cast(holder)->set(index, *value);
+ // Context is a fixed array and set cannot fail.
+ Context::cast(*holder)->set(index, *value);
}
} else {
ASSERT((attributes & READ_ONLY) == 0);
- Handle<JSObject>::cast(holder)->SetElement(index, *value)->
- ToObjectUnchecked();
+ Handle<Object> result =
+ SetElement(Handle<JSObject>::cast(holder), index, value);
+ if (result.is_null()) {
+ ASSERT(Top::has_pending_exception());
+ return Failure::Exception();
+ }
}
return *value;
}
@@ -7244,14 +7416,7 @@
// extension object itself.
if ((attributes & READ_ONLY) == 0 ||
(context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
- Handle<Object> set = SetProperty(context_ext, name, value, NONE);
- if (set.is_null()) {
- // Failure::Exception is converted to a null handle in the
- // handle-based methods such as SetProperty. We therefore need
- // to convert null handles back to exceptions.
- ASSERT(Top::has_pending_exception());
- return Failure::Exception();
- }
+ RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, NONE));
}
return *value;
}
@@ -7550,7 +7715,8 @@
Handle<Context> context(Top::context()->global_context());
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
context,
- true);
+ true,
+ kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
@@ -7559,13 +7725,15 @@
static ObjectPair CompileGlobalEval(Handle<String> source,
- Handle<Object> receiver) {
+ Handle<Object> receiver,
+ StrictModeFlag mode) {
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
Handle<Context>(Top::context()),
- Top::context()->IsGlobalContext());
+ Top::context()->IsGlobalContext(),
+ mode);
if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
shared,
@@ -7576,7 +7744,7 @@
static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL);
}
@@ -7640,12 +7808,16 @@
return MakePair(*callee, Top::context()->global()->global_receiver());
}
- return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+ ASSERT(args[3]->IsSmi());
+ return CompileGlobalEval(args.at<String>(1),
+ args.at<Object>(2),
+ static_cast<StrictModeFlag>(
+ Smi::cast(args[3])->value()));
}
static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(Arguments args) {
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL);
}
@@ -7660,7 +7832,11 @@
return MakePair(*callee, Top::context()->global()->global_receiver());
}
- return CompileGlobalEval(args.at<String>(1), args.at<Object>(2));
+ ASSERT(args[3]->IsSmi());
+ return CompileGlobalEval(args.at<String>(1),
+ args.at<Object>(2),
+ static_cast<StrictModeFlag>(
+ Smi::cast(args[3])->value()));
}
@@ -8720,7 +8896,7 @@
// If we are inspecting an optimized frame use undefined as the
// value for all locals.
//
- // TODO(3141533): We should be able to get the correct values
+ // TODO(1140): We should be able to get the correct values
// for locals in optimized frames.
locals->set(i * 2 + 1, Heap::undefined_value());
} else if (i < info.number_of_stack_slots()) {
@@ -8884,7 +9060,7 @@
// Copy all the context locals into an object used to materialize a scope.
-static void CopyContextLocalsToScopeObject(
+static bool CopyContextLocalsToScopeObject(
Handle<SerializedScopeInfo> serialized_scope_info,
ScopeInfo<>& scope_info,
Handle<Context> context,
@@ -8898,11 +9074,15 @@
// Don't include the arguments shadow (.arguments) context variable.
if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
- SetProperty(scope_object,
- scope_info.context_slot_name(i),
- Handle<Object>(context->get(context_index)), NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(scope_object,
+ scope_info.context_slot_name(i),
+ Handle<Object>(context->get(context_index)), NONE),
+ false);
}
}
+
+ return true;
}
@@ -8920,23 +9100,29 @@
// First fill all parameters.
for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
- SetProperty(local_scope,
- scope_info.parameter_name(i),
- Handle<Object>(frame->GetParameter(i)), NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(local_scope,
+ scope_info.parameter_name(i),
+ Handle<Object>(frame->GetParameter(i)), NONE),
+ Handle<JSObject>());
}
// Second fill all stack locals.
for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
- SetProperty(local_scope,
- scope_info.stack_slot_name(i),
- Handle<Object>(frame->GetExpression(i)), NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(local_scope,
+ scope_info.stack_slot_name(i),
+ Handle<Object>(frame->GetExpression(i)), NONE),
+ Handle<JSObject>());
}
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
- CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
- function_context, local_scope);
+ if (!CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ function_context, local_scope)) {
+ return Handle<JSObject>();
+ }
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
@@ -8949,7 +9135,9 @@
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- SetProperty(local_scope, key, GetProperty(ext, key), NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(local_scope, key, GetProperty(ext, key), NONE),
+ Handle<JSObject>());
}
}
}
@@ -8982,16 +9170,20 @@
for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
// We don't expect exception-throwing getters on the arguments shadow.
Object* element = arguments_shadow->GetElement(i)->ToObjectUnchecked();
- SetProperty(closure_scope,
- scope_info.parameter_name(i),
- Handle<Object>(element),
- NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(closure_scope,
+ scope_info.parameter_name(i),
+ Handle<Object>(element),
+ NONE),
+ Handle<JSObject>());
}
}
// Fill all context locals to the context extension.
- CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
- context, closure_scope);
+ if (!CopyContextLocalsToScopeObject(serialized_scope_info, scope_info,
+ context, closure_scope)) {
+ return Handle<JSObject>();
+ }
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
@@ -9002,7 +9194,9 @@
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- SetProperty(closure_scope, key, GetProperty(ext, key), NONE);
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ SetProperty(closure_scope, key, GetProperty(ext, key), NONE),
+ Handle<JSObject>());
}
}
@@ -9289,6 +9483,7 @@
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
Handle<JSObject> scope_object = it.ScopeObject();
+ RETURN_IF_EMPTY_HANDLE(scope_object);
details->set(kScopeDetailsObjectIndex, *scope_object);
return *Factory::NewJSArrayWithElements(details);
@@ -9770,6 +9965,7 @@
// Materialize the content of the local scope into a JSObject.
Handle<JSObject> local_scope = MaterializeLocalScope(frame);
+ RETURN_IF_EMPTY_HANDLE(local_scope);
// Allocate a new context for the debug evaluation and set the extension
// object build.
@@ -9797,10 +9993,14 @@
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
source_str_length));
+
+ // Currently, the eval code will be executed in non-strict mode,
+ // even in the strict code context.
Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(function_source,
context,
- context->IsGlobalContext());
+ context->IsGlobalContext(),
+ kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Factory::NewFunctionFromSharedFunctionInfo(shared, context);
@@ -9882,10 +10082,10 @@
}
// Compile the source to be evaluated.
+ // Currently, the eval code will be executed in non-strict mode,
+ // even in the strict code context.
Handle<SharedFunctionInfo> shared =
- Compiler::CompileEval(source,
- context,
- is_global);
+ Compiler::CompileEval(source, context, is_global, kNonStrictMode);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
@@ -10745,6 +10945,45 @@
return *value;
}
+
+static MaybeObject* Runtime_NewMessageObject(Arguments args) {
+ HandleScope scope;
+ CONVERT_ARG_CHECKED(String, type, 0);
+ CONVERT_ARG_CHECKED(JSArray, arguments, 1);
+ return *Factory::NewJSMessageObject(type,
+ arguments,
+ 0,
+ 0,
+ Factory::undefined_value(),
+ Factory::undefined_value(),
+ Factory::undefined_value());
+}
+
+
+static MaybeObject* Runtime_MessageGetType(Arguments args) {
+ CONVERT_CHECKED(JSMessageObject, message, args[0]);
+ return message->type();
+}
+
+
+static MaybeObject* Runtime_MessageGetArguments(Arguments args) {
+ CONVERT_CHECKED(JSMessageObject, message, args[0]);
+ return message->arguments();
+}
+
+
+static MaybeObject* Runtime_MessageGetStartPosition(Arguments args) {
+ CONVERT_CHECKED(JSMessageObject, message, args[0]);
+ return Smi::FromInt(message->start_position());
+}
+
+
+static MaybeObject* Runtime_MessageGetScript(Arguments args) {
+ CONVERT_CHECKED(JSMessageObject, message, args[0]);
+ return message->script();
+}
+
+
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
diff --git a/src/runtime.h b/src/runtime.h
index dbd8d64..fb2ff93 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -237,8 +237,8 @@
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 3, 2) \
- F(ResolvePossiblyDirectEvalNoLookup, 3, 2) \
+ F(ResolvePossiblyDirectEval, 4, 2) \
+ F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
@@ -284,7 +284,7 @@
F(NewContext, 1, 1) \
F(PushContext, 1, 1) \
F(PushCatchContext, 1, 1) \
- F(LookupContext, 2, 1) \
+ F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
F(StoreContextSlot, 3, 1) \
@@ -310,6 +310,13 @@
/* Cache suport */ \
F(GetFromCache, 2, 1) \
\
+ /* Message objects */ \
+ F(NewMessageObject, 2, 1) \
+ F(MessageGetType, 1, 1) \
+ F(MessageGetArguments, 1, 1) \
+ F(MessageGetStartPosition, 1, 1) \
+ F(MessageGetScript, 1, 1) \
+ \
/* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1)
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index e79dcff..d2ec54c 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,7 @@
#include "safepoint-table.h"
+#include "deoptimizer.h"
#include "disasm.h"
#include "macro-assembler.h"
@@ -57,7 +58,7 @@
SafepointTable::SafepointTable(Code* code) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code_ = code;
- Address header = code->instruction_start() + code->safepoint_table_start();
+ Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
@@ -117,24 +118,9 @@
}
-Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
- int deoptimization_index) {
- ASSERT(deoptimization_index != -1);
- DeoptimizationInfo pc_and_deoptimization_index;
- pc_and_deoptimization_index.pc = assembler->pc_offset();
- pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
- pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
- pc_and_deoptimization_index.arguments = 0;
- pc_and_deoptimization_index.has_doubles = false;
- deoptimization_info_.Add(pc_and_deoptimization_index);
- indexes_.Add(new ZoneList<int>(8));
- registers_.Add(NULL);
- return Safepoint(indexes_.last(), registers_.last());
-}
-
-
-Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
- Assembler* assembler, int arguments, int deoptimization_index) {
+Safepoint SafepointTableBuilder::DefineSafepoint(
+ Assembler* assembler, Safepoint::Kind kind, int arguments,
+ int deoptimization_index) {
ASSERT(deoptimization_index != -1);
ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index;
@@ -142,30 +128,16 @@
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = arguments;
- pc_and_deoptimization_index.has_doubles = false;
+ pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
- registers_.Add(new ZoneList<int>(4));
+ registers_.Add((kind & Safepoint::kWithRegisters)
+ ? new ZoneList<int>(4)
+ : NULL);
return Safepoint(indexes_.last(), registers_.last());
}
-Safepoint SafepointTableBuilder::DefineSafepointWithRegistersAndDoubles(
- Assembler* assembler, int arguments, int deoptimization_index) {
- ASSERT(deoptimization_index != -1);
- ASSERT(arguments >= 0);
- DeoptimizationInfo pc_and_deoptimization_index;
- pc_and_deoptimization_index.pc = assembler->pc_offset();
- pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
- pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
- pc_and_deoptimization_index.arguments = arguments;
- pc_and_deoptimization_index.has_doubles = true;
- deoptimization_info_.Add(pc_and_deoptimization_index);
- indexes_.Add(new ZoneList<int>(8));
- registers_.Add(new ZoneList<int>(4));
- return Safepoint(indexes_.last(), registers_.last());
-}
-
unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_);
return offset_;
@@ -173,6 +145,14 @@
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
+ while (assembler->pc_offset() < target_offset) {
+ assembler->nop();
+ }
+
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(kIntSize);
assembler->RecordComment(";;; Safepoint table.");
@@ -250,4 +230,24 @@
}
+int SafepointTableBuilder::CountShortDeoptimizationIntervals(unsigned limit) {
+ int result = 0;
+ if (!deoptimization_info_.is_empty()) {
+ unsigned previous_gap_end = deoptimization_info_[0].pc_after_gap;
+ for (int i = 1, n = deoptimization_info_.length(); i < n; i++) {
+ DeoptimizationInfo info = deoptimization_info_[i];
+ if (static_cast<int>(info.deoptimization_index) !=
+ Safepoint::kNoDeoptimizationIndex) {
+ if (previous_gap_end + limit > info.pc) {
+ result++;
+ }
+ previous_gap_end = info.pc_after_gap;
+ }
+ }
+ }
+ return result;
+}
+
+
+
} } // namespace v8::internal
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index d703051..8803d06 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -180,6 +180,13 @@
class Safepoint BASE_EMBEDDED {
public:
+ typedef enum {
+ kSimple = 0,
+ kWithRegisters = 1 << 0,
+ kWithDoubles = 1 << 1,
+ kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
+ } Kind;
+
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
@@ -208,30 +215,13 @@
unsigned GetCodeOffset() const;
// Define a new safepoint for the current position in the body.
- Safepoint DefineSafepoint(
- Assembler* assembler,
- int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+ Safepoint DefineSafepoint(Assembler* assembler,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
- // Define a new safepoint with registers on the stack for the
- // current position in the body and take the number of arguments on
- // top of the registers into account.
- Safepoint DefineSafepointWithRegisters(
- Assembler* assembler,
- int arguments,
- int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
-
- // Define a new safepoint with all double registers and the normal
- // registers on the stack for the current position in the body and
- // take the number of arguments on top of the registers into account.
- // TODO(1043) Rewrite the three SafepointTableBuilder::DefineSafepoint
- // methods to one method that uses template arguments.
- Safepoint DefineSafepointWithRegistersAndDoubles(
- Assembler* assembler,
- int arguments,
- int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
-
- // Update the last safepoint with the size of the code generated for the gap
- // following it.
+ // Update the last safepoint with the size of the code generated until the
+ // end of the gap following it.
void SetPcAfterGap(int pc) {
ASSERT(!deoptimization_info_.is_empty());
int index = deoptimization_info_.length() - 1;
@@ -242,6 +232,11 @@
// entry must be enough to hold all the pointer indexes.
void Emit(Assembler* assembler, int bits_per_entry);
+ // Count the number of deoptimization points where the next
+ // following deoptimization point comes less than limit bytes
+ // after the end of this point's gap.
+ int CountShortDeoptimizationIntervals(unsigned limit);
+
private:
struct DeoptimizationInfo {
unsigned pc;
@@ -257,8 +252,8 @@
ZoneList<ZoneList<int>*> indexes_;
ZoneList<ZoneList<int>*> registers_;
- bool emitted_;
unsigned offset_;
+ bool emitted_;
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 997fb31..80bca4e 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -64,7 +64,8 @@
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner() { }
+Scanner::Scanner()
+ : octal_pos_(kNoOctalLocation) { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -99,7 +100,8 @@
// ECMA-262. Other JS VMs support them.
uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
uc32 x = c - '0';
- for (int i = 0; i < length; i++) {
+ int i = 0;
+ for (; i < length; i++) {
int d = c0_ - '0';
if (d < 0 || d > 7) break;
int nx = x * 8 + d;
@@ -107,6 +109,12 @@
x = nx;
Advance();
}
+ // Anything excelt '\0' is an octal escape sequence, illegal in strict mode.
+ // Remember the position of octal escape sequences so that better error
+ // can be reported later (in strict mode).
+ if (c != '0' || i > 0) {
+ octal_pos_ = source_pos() - i - 1; // Already advanced
+ }
return x;
}
@@ -601,7 +609,11 @@
kind = DECIMAL;
break;
}
- if (c0_ < '0' || '7' < c0_) break;
+ if (c0_ < '0' || '7' < c0_) {
+ // Octal literal finished.
+ octal_pos_ = next_.location.beg_pos;
+ break;
+ }
AddLiteralCharAdvance();
}
}
@@ -784,25 +796,27 @@
{ "break", KEYWORD_PREFIX, Token::BREAK },
{ NULL, C, Token::ILLEGAL },
{ NULL, D, Token::ILLEGAL },
- { "else", KEYWORD_PREFIX, Token::ELSE },
+ { NULL, E, Token::ILLEGAL },
{ NULL, F, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, I, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { "let", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, N, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, P, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ "return", KEYWORD_PREFIX, Token::RETURN },
- { "switch", KEYWORD_PREFIX, Token::SWITCH },
+ { NULL, S, Token::ILLEGAL },
{ NULL, T, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, V, Token::ILLEGAL },
- { NULL, W, Token::ILLEGAL }
+ { NULL, W, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { "yield", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
};
@@ -810,7 +824,7 @@
switch (state_) {
case INITIAL: {
// matching the first character is the only state with significant fanout.
- // Match only lower-case letters in range 'b'..'w'.
+ // Match only lower-case letters in range 'b'..'y'.
unsigned int offset = input - kFirstCharRangeMin;
if (offset < kFirstCharRangeLength) {
state_ = first_states_[offset].state;
@@ -838,6 +852,8 @@
break;
case C:
if (MatchState(input, 'a', CA)) return;
+ if (MatchKeywordStart(input, "class", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'o', CO)) return;
break;
case CA:
@@ -860,6 +876,18 @@
if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
break;
+ case E:
+ if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
+ if (MatchKeywordStart(input, "enum", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchState(input, 'x', EX)) return;
+ break;
+ case EX:
+ if (MatchKeywordStart(input, "export", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "extends", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
case F:
if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
@@ -868,10 +896,22 @@
break;
case I:
if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
+ if (MatchState(input, 'm', IM)) return;
if (MatchKeyword(input, 'n', IN, Token::IN)) return;
break;
+ case IM:
+ if (MatchState(input, 'p', IMP)) return;
+ break;
+ case IMP:
+ if (MatchKeywordStart(input, "implements", 3,
+ Token::FUTURE_RESERVED_WORD )) return;
+ if (MatchKeywordStart(input, "import", 3,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
case IN:
token_ = Token::IDENTIFIER;
+ if (MatchKeywordStart(input, "interface", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
break;
case N:
@@ -879,6 +919,27 @@
if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
break;
+ case P:
+ if (MatchKeywordStart(input, "package", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchState(input, 'r', PR)) return;
+ if (MatchKeywordStart(input, "public", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
+ case PR:
+ if (MatchKeywordStart(input, "private", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "protected", 2,
+ Token::FUTURE_RESERVED_WORD)) return;
+ break;
+ case S:
+ if (MatchKeywordStart(input, "static", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "super", 1,
+ Token::FUTURE_RESERVED_WORD)) return;
+ if (MatchKeywordStart(input, "switch", 1,
+ Token::SWITCH)) return;
+ break;
case T:
if (MatchState(input, 'h', TH)) return;
if (MatchState(input, 'r', TR)) return;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 1024ad1..f5fe7f7 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -247,6 +247,9 @@
// Generic functionality used by both JSON and JavaScript scanners.
class Scanner {
public:
+ // -1 is outside of the range of any real source code.
+ static const int kNoOctalLocation = -1;
+
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
class LiteralScope {
@@ -271,15 +274,28 @@
struct Location {
Location(int b, int e) : beg_pos(b), end_pos(e) { }
Location() : beg_pos(0), end_pos(0) { }
+
+ bool IsValid() const {
+ return beg_pos >= 0 && end_pos >= beg_pos;
+ }
+
int beg_pos;
int end_pos;
};
+ static Location NoLocation() {
+ return Location(-1, -1);
+ }
+
// Returns the location information for the current token
// (the token returned by Next()).
Location location() const { return current_.location; }
Location peek_location() const { return next_.location; }
+ // Returns the location of the last seen octal literal
+ int octal_position() const { return octal_pos_; }
+ void clear_octal_position() { octal_pos_ = -1; }
+
// Returns the literal string, if any, for the current token (the
// token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are
@@ -393,6 +409,8 @@
}
uc32 ScanHexEscape(uc32 c, int length);
+
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
uc32 ScanOctalEscape(uc32 c, int length);
// Return the current source position.
@@ -410,6 +428,8 @@
// Input stream. Must be initialized to an UC16CharacterStream.
UC16CharacterStream* source_;
+ // Start position of the octal literal last scanned.
+ int octal_pos_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
@@ -544,10 +564,17 @@
CON,
D,
DE,
+ E,
+ EX,
F,
I,
+ IM,
+ IMP,
IN,
N,
+ P,
+ PR,
+ S,
T,
TH,
TR,
@@ -563,7 +590,7 @@
// Range of possible first characters of a keyword.
static const unsigned int kFirstCharRangeMin = 'b';
- static const unsigned int kFirstCharRangeMax = 'w';
+ static const unsigned int kFirstCharRangeMax = 'y';
static const unsigned int kFirstCharRangeLength =
kFirstCharRangeMax - kFirstCharRangeMin + 1;
// State map for first keyword character range.
diff --git a/src/scanner.cc b/src/scanner.cc
index b66d10b..d54d9f9 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -76,7 +76,8 @@
buffer_end_ = buffer_ + kBufferSize;
buffer_cursor_ = buffer_end_;
}
- ASSERT(pushback_limit_ > buffer_);
+ // Ensure that there is room for at least one pushback.
+ ASSERT(buffer_cursor_ > buffer_);
ASSERT(pos_ > 0);
buffer_[--buffer_cursor_ - buffer_] = character;
if (buffer_cursor_ == buffer_) {
@@ -89,15 +90,17 @@
bool BufferedUC16CharacterStream::ReadBlock() {
+ buffer_cursor_ = buffer_;
if (pushback_limit_ != NULL) {
- buffer_cursor_ = buffer_;
+ // Leave pushback mode.
buffer_end_ = pushback_limit_;
pushback_limit_ = NULL;
- ASSERT(buffer_cursor_ != buffer_end_);
- return true;
+ // If there were any valid characters left at the
+ // start of the buffer, use those.
+ if (buffer_cursor_ < buffer_end_) return true;
+ // Otherwise read a new block.
}
unsigned length = FillBuffer(pos_, kBufferSize);
- buffer_cursor_ = buffer_;
buffer_end_ = buffer_ + length;
return length > 0;
}
@@ -516,17 +519,30 @@
Token::Value JsonScanner::ScanJsonNumber() {
LiteralScope literal(this);
- if (c0_ == '-') AddLiteralCharAdvance();
+ bool negative = false;
+
+ if (c0_ == '-') {
+ AddLiteralCharAdvance();
+ negative = true;
+ }
if (c0_ == '0') {
AddLiteralCharAdvance();
// Prefix zero is only allowed if it's the only digit before
// a decimal point or exponent.
if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
} else {
+ int i = 0;
+ int digits = 0;
if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
do {
+ i = i * 10 + c0_ - '0';
+ digits++;
AddLiteralCharAdvance();
} while (c0_ >= '0' && c0_ <= '9');
+ if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+ number_ = (negative ? -i : i);
+ return Token::NUMBER;
+ }
}
if (c0_ == '.') {
AddLiteralCharAdvance();
@@ -544,6 +560,10 @@
} while (c0_ >= '0' && c0_ <= '9');
}
literal.Complete();
+ ASSERT_NOT_NULL(next_.literal_chars);
+ number_ = StringToDouble(next_.literal_chars->ascii_literal(),
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
return Token::NUMBER;
}
diff --git a/src/scanner.h b/src/scanner.h
index d762182..cf2084f 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -148,6 +148,12 @@
// Returns the next token.
Token::Value Next();
+ // Returns the value of a number token.
+ double number() {
+ return number_;
+ }
+
+
protected:
// Skip past JSON whitespace (only space, tab, newline and carrige-return).
bool SkipJsonWhiteSpace();
@@ -178,6 +184,9 @@
// are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral).
Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+
+ // Holds the value of a scanned number token.
+ double number_;
};
} } // namespace v8::internal
diff --git a/src/scopes.cc b/src/scopes.cc
index d3f54ad..fd573b0 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -383,8 +383,7 @@
void Scope::SetIllegalRedeclaration(Expression* expression) {
- // Only set the illegal redeclaration expression the
- // first time the function is called.
+ // Record only the first illegal redeclaration.
if (!HasIllegalRedeclaration()) {
illegal_redecl_ = expression;
}
diff --git a/src/scopes.h b/src/scopes.h
index 09901ad..a9220eb 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -289,6 +289,17 @@
int ContextChainLength(Scope* scope);
// ---------------------------------------------------------------------------
+ // Strict mode support.
+ bool IsDeclared(Handle<String> name) {
+ // During formal parameter list parsing the scope only contains
+ // two variables inserted at initialization: "this" and "arguments".
+ // "this" is an invalid parameter name and "arguments" is invalid parameter
+ // name in strict mode. Therefore looking up with the map which includes
+ // "this" and "arguments" in addition to all formal parameters is safe.
+ return variables_.Lookup(name) != NULL;
+ }
+
+ // ---------------------------------------------------------------------------
// Debugging.
#ifdef DEBUG
diff --git a/src/serialize.cc b/src/serialize.cc
index 6a6c6bb..f8e98d3 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -192,7 +192,7 @@
{ BUILTIN, \
Builtins::name, \
"Builtins::" #name },
-#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name, ignored)
+#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
BUILTIN_LIST_C(DEF_ENTRY_C)
BUILTIN_LIST_A(DEF_ENTRY_A)
@@ -335,7 +335,7 @@
Add(ExternalReference::delete_handle_scope_extensions().address(),
RUNTIME_ENTRY,
- 3,
+ 4,
"HandleScope::DeleteExtensions");
// Miscellaneous
@@ -504,7 +504,7 @@
"power_double_int_function");
Add(ExternalReference::arguments_marker_location().address(),
UNCLASSIFIED,
- 40,
+ 41,
"Factory::arguments_marker().location()");
}
diff --git a/src/spaces.cc b/src/spaces.cc
index fca1032..a586fbf 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "liveobjectlist-inl.h"
#include "macro-assembler.h"
#include "mark-compact.h"
#include "platform.h"
@@ -3125,6 +3126,8 @@
// Free the chunk.
MarkCompactCollector::ReportDeleteIfNeeded(object);
+ LiveObjectList::ProcessNonLive(object);
+
size_ -= static_cast<int>(chunk_size);
objects_size_ -= object->Size();
page_count_--;
diff --git a/src/string.js b/src/string.js
index ab0ab54..2b73e0f 100644
--- a/src/string.js
+++ b/src/string.js
@@ -103,17 +103,14 @@
// ECMA-262 section 15.5.4.7
function StringIndexOf(pattern /* position */) { // length == 1
var subject = TO_STRING_INLINE(this);
- var pattern = TO_STRING_INLINE(pattern);
- var subject_len = subject.length;
- var pattern_len = pattern.length;
+ pattern = TO_STRING_INLINE(pattern);
var index = 0;
if (%_ArgumentsLength() > 1) {
- var arg1 = %_Arguments(1); // position
- index = TO_INTEGER(arg1);
+ index = %_Arguments(1); // position
+ index = TO_INTEGER(index);
+ if (index < 0) index = 0;
+ if (index > subject.length) index = subject.length;
}
- if (index < 0) index = 0;
- if (index > subject_len) index = subject_len;
- if (pattern_len + index > subject_len) return -1;
return %StringIndexOf(subject, pattern, index);
}
@@ -405,8 +402,7 @@
lastMatchInfoOverride = override;
var func_result =
%_CallFunction(receiver, elem, match_start, subject, replace);
- func_result = TO_STRING_INLINE(func_result);
- res[i] = func_result;
+ res[i] = TO_STRING_INLINE(func_result);
match_start += elem.length;
}
i++;
@@ -419,8 +415,7 @@
// Use the apply argument as backing for global RegExp properties.
lastMatchInfoOverride = elem;
var func_result = replace.apply(null, elem);
- func_result = TO_STRING_INLINE(func_result);
- res[i] = func_result;
+ res[i] = TO_STRING_INLINE(func_result);
}
i++;
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 295cc4a..f87728b 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -441,6 +441,12 @@
MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
+ // Using NORMAL as the PropertyType for array element loads is a misuse. The
+ // generated stub always accesses fast elements, not slow-mode fields, but
+ // some property type is required for the stub lookup. Note that overloading
+ // the NORMAL PropertyType is only safe as long as no stubs are generated for
+ // other keyed field loads. This is guaranteed to be the case since all field
+ // keyed loads that are not array elements go through a generic builtin stub.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
String* name = Heap::KeyedLoadSpecialized_symbol();
@@ -461,15 +467,44 @@
}
+MaybeObject* StubCache::ComputeKeyedLoadPixelArray(JSObject* receiver) {
+ // Using NORMAL as the PropertyType for array element loads is a misuse. The
+ // generated stub always accesses fast elements, not slow-mode fields, but
+ // some property type is required for the stub lookup. Note that overloading
+ // the NORMAL PropertyType is only safe as long as no stubs are generated for
+ // other keyed field loads. This is guaranteed to be the case since all field
+ // keyed loads that are not array elements go through a generic builtin stub.
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
+ String* name = Heap::KeyedLoadPixelArray_symbol();
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedLoadStubCompiler compiler;
+ { MaybeObject* maybe_code = compiler.CompileLoadPixelArray(receiver);
+ if (!maybe_code->ToObject(&code)) return maybe_code;
+ }
+ PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+ Object* result;
+ { MaybeObject* maybe_result =
+ receiver->UpdateMapCodeCache(name, Code::cast(code));
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ }
+ return code;
+}
+
+
MaybeObject* StubCache::ComputeStoreField(String* name,
JSObject* receiver,
int field_index,
- Map* transition) {
+ Map* transition,
+ Code::ExtraICState extra_ic_state) {
PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, type, extra_ic_state);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- StoreStubCompiler compiler;
+ StoreStubCompiler compiler(extra_ic_state);
{ MaybeObject* maybe_code =
compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -507,18 +542,90 @@
}
-MaybeObject* StubCache::ComputeStoreNormal() {
- return Builtins::builtin(Builtins::StoreIC_Normal);
+namespace {
+
+ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
+ switch (kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ return kExternalByteArray;
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return kExternalUnsignedByteArray;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ return kExternalShortArray;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return kExternalUnsignedShortArray;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ return kExternalIntArray;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ return kExternalUnsignedIntArray;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ return kExternalFloatArray;
+ default:
+ UNREACHABLE();
+ return static_cast<ExternalArrayType>(0);
+ }
+}
+
+} // anonymous namespace
+
+
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
+ JSObject* receiver,
+ bool is_store) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(
+ is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
+ NORMAL);
+ ExternalArrayType array_type =
+ ElementsKindToExternalArrayType(receiver->GetElementsKind());
+ String* name =
+ is_store ? Heap::KeyedStoreExternalArray_symbol()
+ : Heap::KeyedLoadExternalArray_symbol();
+ // Use the global maps for the particular external array types,
+ // rather than the receiver's map, when looking up the cached code,
+ // so that we actually canonicalize these stubs.
+ Map* map = Heap::MapForExternalArrayType(array_type);
+ Object* code = map->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ ExternalArrayStubCompiler compiler;
+ { MaybeObject* maybe_code =
+ is_store ? compiler.CompileKeyedStoreStub(array_type, flags) :
+ compiler.CompileKeyedLoadStub(array_type, flags);
+ if (!maybe_code->ToObject(&code)) return maybe_code;
+ }
+ if (is_store) {
+ PROFILE(
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
+ } else {
+ PROFILE(
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+ }
+ Object* result;
+ { MaybeObject* maybe_result =
+ map->UpdateCodeCache(name, Code::cast(code));
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ }
+ return code;
+}
+
+
+MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) {
+ return Builtins::builtin(extra_ic_state == StoreIC::kStoreICStrict
+ ? Builtins::StoreIC_Normal_Strict
+ : Builtins::StoreIC_Normal);
}
MaybeObject* StubCache::ComputeStoreGlobal(String* name,
GlobalObject* receiver,
- JSGlobalPropertyCell* cell) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
+ JSGlobalPropertyCell* cell,
+ Code::ExtraICState extra_ic_state) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, NORMAL, extra_ic_state);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- StoreStubCompiler compiler;
+ StoreStubCompiler compiler(extra_ic_state);
{ MaybeObject* maybe_code =
compiler.CompileStoreGlobal(receiver, cell, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -535,14 +642,17 @@
}
-MaybeObject* StubCache::ComputeStoreCallback(String* name,
- JSObject* receiver,
- AccessorInfo* callback) {
+MaybeObject* StubCache::ComputeStoreCallback(
+ String* name,
+ JSObject* receiver,
+ AccessorInfo* callback,
+ Code::ExtraICState extra_ic_state) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, CALLBACKS);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, CALLBACKS, extra_ic_state);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- StoreStubCompiler compiler;
+ StoreStubCompiler compiler(extra_ic_state);
{ MaybeObject* maybe_code =
compiler.CompileStoreCallback(receiver, callback, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -559,13 +669,15 @@
}
-MaybeObject* StubCache::ComputeStoreInterceptor(String* name,
- JSObject* receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::STORE_IC, INTERCEPTOR);
+MaybeObject* StubCache::ComputeStoreInterceptor(
+ String* name,
+ JSObject* receiver,
+ Code::ExtraICState extra_ic_state) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, INTERCEPTOR, extra_ic_state);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- StoreStubCompiler compiler;
+ StoreStubCompiler compiler(extra_ic_state);
{ MaybeObject* maybe_code =
compiler.CompileStoreInterceptor(receiver, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -1536,7 +1648,8 @@
MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type,
+ extra_ic_state_);
MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
@@ -1709,4 +1822,16 @@
}
+MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
+ Object* result;
+ { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Code* code = Code::cast(result);
+ USE(code);
+ PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
+ return result;
+}
+
+
} } // namespace v8::internal
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 85dd5f6..307939d 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -133,28 +133,37 @@
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
JSObject* receiver);
+ MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadPixelArray(
+ JSObject* receiver);
+
// ---
- MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition = NULL);
+ MUST_USE_RESULT static MaybeObject* ComputeStoreField(
+ String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition,
+ Code::ExtraICState extra_ic_state);
- MUST_USE_RESULT static MaybeObject* ComputeStoreNormal();
+ MUST_USE_RESULT static MaybeObject* ComputeStoreNormal(
+ Code::ExtraICState extra_ic_state);
MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal(
String* name,
GlobalObject* receiver,
- JSGlobalPropertyCell* cell);
+ JSGlobalPropertyCell* cell,
+ Code::ExtraICState extra_ic_state);
MUST_USE_RESULT static MaybeObject* ComputeStoreCallback(
String* name,
JSObject* receiver,
- AccessorInfo* callback);
+ AccessorInfo* callback,
+ Code::ExtraICState extra_ic_state);
MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor(
String* name,
- JSObject* receiver);
+ JSObject* receiver,
+ Code::ExtraICState extra_ic_state);
// ---
@@ -167,6 +176,10 @@
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver);
+ MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
+ JSObject* receiver,
+ bool is_store);
+
// ---
MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
@@ -423,7 +436,8 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss_label);
+ Label* miss_label,
+ bool support_wrappers);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
@@ -497,17 +511,16 @@
String* name,
Label* miss);
- bool GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure);
+ MaybeObject* GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss);
void GenerateLoadConstant(JSObject* object,
JSObject* holder,
@@ -603,6 +616,7 @@
MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* CompileLoadPixelArray(JSObject* receiver);
private:
MaybeObject* GetCode(PropertyType type, String* name);
@@ -611,6 +625,9 @@
class StoreStubCompiler: public StubCompiler {
public:
+ explicit StoreStubCompiler(Code::ExtraICState extra_ic_state)
+ : extra_ic_state_(extra_ic_state) { }
+
MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -628,6 +645,8 @@
private:
MaybeObject* GetCode(PropertyType type, String* name);
+
+ Code::ExtraICState extra_ic_state_;
};
@@ -797,6 +816,20 @@
CallHandlerInfo* api_call_info_;
};
+class ExternalArrayStubCompiler: public StubCompiler {
+ public:
+ explicit ExternalArrayStubCompiler() {}
+
+ MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags);
+
+ MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags);
+
+ private:
+ MaybeObject* GetCode(Code::Flags flags);
+};
+
} } // namespace v8::internal
#endif // V8_STUB_CACHE_H_
diff --git a/src/third_party/strongtalk/LICENSE b/src/third_party/strongtalk/LICENSE
deleted file mode 100644
index 7473a7b..0000000
--- a/src/third_party/strongtalk/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (c) 1994-2006 Sun Microsystems Inc.
-All Rights Reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-- Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
-- Redistribution in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-- Neither the name of Sun Microsystems or the names of contributors may
- be used to endorse or promote products derived from this software without
- specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/strongtalk/README.chromium b/src/third_party/strongtalk/README.chromium
deleted file mode 100644
index ba2b789..0000000
--- a/src/third_party/strongtalk/README.chromium
+++ /dev/null
@@ -1,18 +0,0 @@
-Name: Strongtalk
-URL: http://www.strongtalk.org/
-
-Code from the Strongtalk assembler is used with modification in the following
-files:
-
-src/assembler.h
-src/assembler.cc
-src/arm/assembler-arm.cc
-src/arm/assembler-arm.h
-src/arm/assembler-arm-inl.h
-src/ia32/assembler-ia32.cc
-src/ia32/assembler-ia32.h
-src/ia32/assembler-ia32-inl.h
-src/mips/assembler-mips.cc
-src/mips/assembler-mips.h
-src/mips/assembler-mips-inl.h
-src/x64/assembler-x64.h
diff --git a/src/token.h b/src/token.h
index fb890d2..776d9f3 100644
--- a/src/token.h
+++ b/src/token.h
@@ -155,38 +155,6 @@
K(WHILE, "while", 0) \
K(WITH, "with", 0) \
\
- /* Future reserved words (ECMA-262, section 7.5.3, page 14). */ \
- F(ABSTRACT, "abstract", 0) \
- F(BOOLEAN, "boolean", 0) \
- F(BYTE, "byte", 0) \
- F(CHAR, "char", 0) \
- F(CLASS, "class", 0) \
- K(CONST, "const", 0) \
- F(DOUBLE, "double", 0) \
- F(ENUM, "enum", 0) \
- F(EXPORT, "export", 0) \
- F(EXTENDS, "extends", 0) \
- F(FINAL, "final", 0) \
- F(FLOAT, "float", 0) \
- F(GOTO, "goto", 0) \
- F(IMPLEMENTS, "implements", 0) \
- F(IMPORT, "import", 0) \
- F(INT, "int", 0) \
- F(INTERFACE, "interface", 0) \
- F(LONG, "long", 0) \
- K(NATIVE, "native", 0) \
- F(PACKAGE, "package", 0) \
- F(PRIVATE, "private", 0) \
- F(PROTECTED, "protected", 0) \
- F(PUBLIC, "public", 0) \
- F(SHORT, "short", 0) \
- F(STATIC, "static", 0) \
- F(SUPER, "super", 0) \
- F(SYNCHRONIZED, "synchronized", 0) \
- F(THROWS, "throws", 0) \
- F(TRANSIENT, "transient", 0) \
- F(VOLATILE, "volatile", 0) \
- \
/* Literals (ECMA-262, section 7.8, page 16). */ \
K(NULL_LITERAL, "null", 0) \
K(TRUE_LITERAL, "true", 0) \
@@ -197,6 +165,11 @@
/* Identifiers (not keywords or future reserved words). */ \
T(IDENTIFIER, NULL, 0) \
\
+ /* Future reserved words (ECMA-262, section 7.6.1.2). */ \
+ T(FUTURE_RESERVED_WORD, NULL, 0) \
+ K(CONST, "const", 0) \
+ K(NATIVE, "native", 0) \
+ \
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
\
diff --git a/src/top.cc b/src/top.cc
index 98c673c..3364c0d 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -72,7 +72,7 @@
handler_ = 0;
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
- simulator_ = assembler::arm::Simulator::current();
+ simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS
simulator_ = assembler::mips::Simulator::current();
#endif
@@ -380,16 +380,16 @@
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
- Handle<String> column_key = Factory::LookupAsciiSymbol("column");
- Handle<String> line_key = Factory::LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = Factory::LookupAsciiSymbol("scriptName");
+ Handle<String> column_key = Factory::LookupAsciiSymbol("column");
+ Handle<String> line_key = Factory::LookupAsciiSymbol("lineNumber");
+ Handle<String> script_key = Factory::LookupAsciiSymbol("scriptName");
Handle<String> name_or_source_url_key =
Factory::LookupAsciiSymbol("nameOrSourceURL");
Handle<String> script_name_or_source_url_key =
Factory::LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = Factory::LookupAsciiSymbol("functionName");
- Handle<String> eval_key = Factory::LookupAsciiSymbol("isEval");
- Handle<String> constructor_key = Factory::LookupAsciiSymbol("isConstructor");
+ Handle<String> function_key = Factory::LookupAsciiSymbol("functionName");
+ Handle<String> eval_key = Factory::LookupAsciiSymbol("isEval");
+ Handle<String> constructor_key = Factory::LookupAsciiSymbol("isConstructor");
StackTraceFrameIterator it;
int frames_seen = 0;
@@ -421,16 +421,16 @@
// tag.
column_offset += script->column_offset()->value();
}
- SetProperty(stackFrame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE);
+ SetLocalPropertyNoThrow(stackFrame, column_key,
+ Handle<Smi>(Smi::FromInt(column_offset + 1)));
}
- SetProperty(stackFrame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)), NONE);
+ SetLocalPropertyNoThrow(stackFrame, line_key,
+ Handle<Smi>(Smi::FromInt(line_number + 1)));
}
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name());
- SetProperty(stackFrame, script_key, script_name, NONE);
+ SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
}
if (options & StackTrace::kScriptNameOrSourceURL) {
@@ -446,7 +446,8 @@
if (caught_exception) {
result = Factory::undefined_value();
}
- SetProperty(stackFrame, script_name_or_source_url_key, result, NONE);
+ SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
+ result);
}
if (options & StackTrace::kFunctionName) {
@@ -454,20 +455,20 @@
if (fun_name->ToBoolean()->IsFalse()) {
fun_name = Handle<Object>(fun->shared()->inferred_name());
}
- SetProperty(stackFrame, function_key, fun_name, NONE);
+ SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
}
if (options & StackTrace::kIsEval) {
int type = Smi::cast(script->compilation_type())->value();
Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
Factory::true_value() : Factory::false_value();
- SetProperty(stackFrame, eval_key, is_eval, NONE);
+ SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
}
if (options & StackTrace::kIsConstructor) {
Handle<Object> is_constructor = (frames[i].is_constructor()) ?
Factory::true_value() : Factory::false_value();
- SetProperty(stackFrame, constructor_key, is_constructor, NONE);
+ SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
}
FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
@@ -806,7 +807,7 @@
}
-bool Top::ShouldReturnException(bool* is_caught_externally,
+bool Top::ShouldReportException(bool* is_caught_externally,
bool catchable_by_javascript) {
// Find the top-most try-catch handler.
StackHandler* handler =
@@ -847,15 +848,15 @@
Handle<Object> exception_handle(exception_object);
// Determine reporting and whether the exception is caught externally.
- bool is_caught_externally = false;
bool is_out_of_memory = exception == Failure::OutOfMemoryException();
bool is_termination_exception = exception == Heap::termination_exception();
bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory;
// Only real objects can be caught by JS.
ASSERT(!catchable_by_javascript || is_object);
- bool should_return_exception =
- ShouldReturnException(&is_caught_externally, catchable_by_javascript);
- bool report_exception = catchable_by_javascript && should_return_exception;
+ bool is_caught_externally = false;
+ bool should_report_exception =
+ ShouldReportException(&is_caught_externally, catchable_by_javascript);
+ bool report_exception = catchable_by_javascript && should_report_exception;
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception.
@@ -1095,7 +1096,7 @@
// thread_local_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
- thread_local_.simulator_ = assembler::arm::Simulator::current();
+ thread_local_.simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS
thread_local_.simulator_ = assembler::mips::Simulator::current();
#endif
diff --git a/src/top.h b/src/top.h
index e485de1..c052dad 100644
--- a/src/top.h
+++ b/src/top.h
@@ -32,15 +32,24 @@
#include "compilation-cache.h"
#include "frames-inl.h"
#include "runtime-profiler.h"
-#include "simulator.h"
namespace v8 {
namespace internal {
+class Simulator;
#define RETURN_IF_SCHEDULED_EXCEPTION() \
if (Top::has_scheduled_exception()) return Top::PromoteScheduledException()
+#define RETURN_IF_EMPTY_HANDLE_VALUE(call, value) \
+ if (call.is_null()) { \
+ ASSERT(Top::has_pending_exception()); \
+ return value; \
+ }
+
+#define RETURN_IF_EMPTY_HANDLE(call) \
+ RETURN_IF_EMPTY_HANDLE_VALUE(call, Failure::Exception())
+
// Top has static variables used for JavaScript execution.
class SaveContext; // Forward declaration.
@@ -109,7 +118,7 @@
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
- assembler::arm::Simulator* simulator_;
+ Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_;
#endif
@@ -386,7 +395,9 @@
static void DoThrow(MaybeObject* exception,
MessageLocation* location,
const char* message);
- static bool ShouldReturnException(bool* is_caught_externally,
+ // Checks if exception should be reported and finds out if it's
+ // caught externally.
+ static bool ShouldReportException(bool* is_caught_externally,
bool catchable_by_javascript);
// Attempts to compute the current source location, storing the
diff --git a/src/type-info.cc b/src/type-info.cc
index f4f65e9..0bb7262 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -171,7 +171,7 @@
}
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
+TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
Handle<Object> object = GetElement(map_, expr->position());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
@@ -198,7 +198,7 @@
}
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) {
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
Handle<Object> object = GetElement(map_, expr->position());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
diff --git a/src/type-info.h b/src/type-info.h
index e026e88..34ff584 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -120,9 +120,9 @@
}
- // Integer32 is an integer that can be represented as either a signed
- // 32-bit integer or as an unsigned 32-bit integer. It has to be
- // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
+ // Integer32 is an integer that can be represented as a signed
+ // 32-bit integer. It has to be
+ // in the range [-2^31, 2^31 - 1]. We also have to check for negative 0
// as it is not an Integer32.
static inline bool IsInt32Double(double value) {
const DoubleRepresentation minus_zero(-0.0);
@@ -236,12 +236,6 @@
class TypeFeedbackOracle BASE_EMBEDDED {
public:
- enum Side {
- LEFT,
- RIGHT,
- RESULT
- };
-
TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
bool LoadIsMonomorphic(Property* expr);
@@ -261,8 +255,8 @@
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// Get type information for arithmetic operations and compares.
- TypeInfo BinaryType(BinaryOperation* expr, Side side);
- TypeInfo CompareType(CompareOperation* expr, Side side);
+ TypeInfo BinaryType(BinaryOperation* expr);
+ TypeInfo CompareType(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
private:
diff --git a/src/uri.js b/src/uri.js
index 3adab83..e94b3fe 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -90,11 +90,13 @@
}
-function URIHexCharsToCharCode(ch1, ch2) {
- if (HexValueOf(ch1) == -1 || HexValueOf(ch2) == -1) {
+function URIHexCharsToCharCode(highChar, lowChar) {
+ var highCode = HexValueOf(highChar);
+ var lowCode = HexValueOf(lowChar);
+ if (highCode == -1 || lowCode == -1) {
throw new $URIError("URI malformed");
}
- return HexStrToCharCode(ch1 + ch2);
+ return (highCode << 4) | lowCode;
}
@@ -196,7 +198,7 @@
var ch = uri.charAt(k);
if (ch == '%') {
if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
+ var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
if (cc >> 7) {
var n = 0;
while (((cc << ++n) & 0x80) != 0) ;
@@ -205,8 +207,8 @@
octets[0] = cc;
if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
for (var i = 1; i < n; i++) {
- k++;
- octets[i] = URIHexCharsToCharCode(uri.charAt(++k), uri.charAt(++k));
+ if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
+ octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
}
index = URIDecodeOctets(octets, result, index);
} else {
@@ -325,9 +327,7 @@
}
-function HexValueOf(c) {
- var code = c.charCodeAt(0);
-
+function HexValueOf(code) {
// 0-9
if (code >= 48 && code <= 57) return code - 48;
// A-F
@@ -356,18 +356,6 @@
}
-// Converts hex string to char code. Not efficient.
-function HexStrToCharCode(s) {
- var m = 0;
- var r = 0;
- for (var i = s.length - 1; i >= 0; --i) {
- r = r + (HexValueOf(s.charAt(i)) << m);
- m = m + 4;
- }
- return r;
-}
-
-
// Returns true if all digits in string s are valid hex numbers
function IsValidHex(s) {
for (var i = 0; i < s.length; ++i) {
@@ -412,4 +400,3 @@
}
SetupURI();
-
diff --git a/src/utils.cc b/src/utils.cc
index d0ec4ef..b466301 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -276,4 +276,96 @@
}
+MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
+ : filename_(NULL),
+ data_(NULL),
+ length_(0),
+ remove_file_on_cleanup_(false) {
+ Init(filename);
+}
+
+
+MemoryMappedExternalResource::
+ MemoryMappedExternalResource(const char* filename,
+ bool remove_file_on_cleanup)
+ : filename_(NULL),
+ data_(NULL),
+ length_(0),
+ remove_file_on_cleanup_(remove_file_on_cleanup) {
+ Init(filename);
+}
+
+
+MemoryMappedExternalResource::~MemoryMappedExternalResource() {
+ // Release the resources if we had successfully acquired them:
+ if (file_ != NULL) {
+ delete file_;
+ if (remove_file_on_cleanup_) {
+ OS::Remove(filename_);
+ }
+ DeleteArray<char>(filename_);
+ }
+}
+
+
+void MemoryMappedExternalResource::Init(const char* filename) {
+ file_ = OS::MemoryMappedFile::open(filename);
+ if (file_ != NULL) {
+ filename_ = StrDup(filename);
+ data_ = reinterpret_cast<char*>(file_->memory());
+ length_ = file_->size();
+ }
+}
+
+
+bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
+ bool is_ascii = true;
+
+ int line_no = 1;
+ const char* start_of_line = data_;
+ const char* end = data_ + length_;
+ for (const char* p = data_; p < end; p++) {
+ char c = *p;
+ if ((c & 0x80) != 0) {
+ // Non-ascii detected:
+ is_ascii = false;
+
+ // Report the error and abort if appropriate:
+ if (abort_if_failed) {
+ int char_no = static_cast<int>(p - start_of_line) - 1;
+
+ ASSERT(filename_ != NULL);
+ PrintF("\n\n\n"
+ "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
+ c, filename_, line_no, char_no);
+
+ // Allow for some context up to kNumberOfLeadingContextChars chars
+ // before the offending non-ascii char to help the user see where
+ // the offending char is.
+ const int kNumberOfLeadingContextChars = 10;
+ const char* err_context = p - kNumberOfLeadingContextChars;
+ if (err_context < data_) {
+ err_context = data_;
+ }
+ // Compute the length of the error context and print it.
+ int err_context_length = static_cast<int>(p - err_context);
+ if (err_context_length != 0) {
+ PrintF(" after \"%.*s\"", err_context_length, err_context);
+ }
+ PrintF(".\n\n\n");
+ OS::Abort();
+ }
+
+ break; // Non-ascii detected. No need to continue scanning.
+ }
+ if (c == '\n') {
+ start_of_line = p;
+ line_no++;
+ }
+ }
+
+ return is_ascii;
+}
+
+
} } // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index 21e70d7..219343b 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -760,20 +760,32 @@
// you can use BitCast to cast one pointer type to another. This confuses gcc
// enough that it can no longer see that you have cast one pointer type to
// another thus avoiding the warning.
+
+// We need different implementations of BitCast for pointer and non-pointer
+// values. We use partial specialization of auxiliary struct to work around
+// issues with template functions overloading.
+template <class Dest, class Source>
+struct BitCastHelper {
+ STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
+
+ INLINE(static Dest cast(const Source& source)) {
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+ }
+};
+
+template <class Dest, class Source>
+struct BitCastHelper<Dest, Source*> {
+ INLINE(static Dest cast(Source* source)) {
+ return BitCastHelper<Dest, uintptr_t>::
+ cast(reinterpret_cast<uintptr_t>(source));
+ }
+};
+
template <class Dest, class Source>
inline Dest BitCast(const Source& source) {
- // Compile time assertion: sizeof(Dest) == sizeof(Source)
- // A compile error here means your Dest and Source have different sizes.
- typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
-
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
-}
-
-template <class Dest, class Source>
-inline Dest BitCast(Source* source) {
- return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+ return BitCastHelper<Dest, Source>::cast(source);
}
} } // namespace v8::internal
diff --git a/src/v8-counters.h b/src/v8-counters.h
index aa30e4e..9b91ace 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -128,6 +128,7 @@
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
SC(map_slow_to_fast_elements, V8.MapSlowToFastElements) \
SC(map_fast_to_slow_elements, V8.MapFastToSlowElements) \
+ SC(map_to_pixel_array_elements, V8.MapToPixelArrayElements) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
diff --git a/src/v8.cc b/src/v8.cc
index f5b6150..0ff7649 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -34,7 +34,6 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "log.h"
-#include "oprofile-agent.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "simulator.h"
@@ -79,7 +78,7 @@
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM)
- ::assembler::arm::Simulator::Initialize();
+ Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize();
#endif
@@ -129,7 +128,6 @@
// objects in place for creating the code object used for probing.
CPU::Setup();
- OProfileAgent::Initialize();
Deoptimizer::Setup();
LAllocator::Setup();
RuntimeProfiler::Setup();
@@ -161,7 +159,6 @@
Logger::EnsureTickerStopped();
Deoptimizer::TearDown();
- OProfileAgent::TearDown();
if (FLAG_preemption) {
v8::Locker locker;
diff --git a/src/v8globals.h b/src/v8globals.h
index 3f27114..d11bc38 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -69,21 +69,21 @@
// Zap-value: The value used for zapping dead objects.
-// Should be a recognizable hex value tagged as a heap object pointer.
+// Should be a recognizable hex value tagged as a failure.
#ifdef V8_HOST_ARCH_64_BIT
const Address kZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
+ reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
const Address kHandleZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
+ reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
const Address kFromSpaceZapValue =
- reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
+ reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
-const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeed);
+const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
#else
-const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
-const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
-const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
-const uint32_t kSlotsZapValue = 0xbeefdeed;
+const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
+const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
+const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
+const uint32_t kSlotsZapValue = 0xbeefdeef;
const uint32_t kDebugZapValue = 0xbadbaddb;
#endif
@@ -469,6 +469,12 @@
ARMv7 = 2, // ARM
SAHF = 0}; // x86
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+enum StrictModeFlag {
+ kNonStrictMode,
+ kStrictMode
+};
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/src/v8natives.js b/src/v8natives.js
index 233f8b4..83b00b0 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -491,28 +491,29 @@
}
-
-// ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
- var desc = new PropertyDescriptor();
-
- // GetOwnProperty returns an array indexed by the constants
- // defined in macros.py.
- // If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), ToString(p));
-
- if (IS_UNDEFINED(props)) return void 0;
-
- // This is an accessor
- if (props[IS_ACCESSOR_INDEX]) {
- desc.setGet(props[GETTER_INDEX]);
- desc.setSet(props[SETTER_INDEX]);
- } else {
- desc.setValue(props[VALUE_INDEX]);
- desc.setWritable(props[WRITABLE_INDEX]);
+// Converts an array returned from Runtime_GetOwnProperty to an actual
+// property descriptor. For a description of the array layout please
+// see the runtime.cc file.
+function ConvertDescriptorArrayToDescriptor(desc_array) {
+ if (desc_array == false) {
+ throw 'Internal error: invalid desc_array';
}
- desc.setEnumerable(props[ENUMERABLE_INDEX]);
- desc.setConfigurable(props[CONFIGURABLE_INDEX]);
+
+ if (IS_UNDEFINED(desc_array)) {
+ return void 0;
+ }
+
+ var desc = new PropertyDescriptor();
+ // This is an accessor.
+ if (desc_array[IS_ACCESSOR_INDEX]) {
+ desc.setGet(desc_array[GETTER_INDEX]);
+ desc.setSet(desc_array[SETTER_INDEX]);
+ } else {
+ desc.setValue(desc_array[VALUE_INDEX]);
+ desc.setWritable(desc_array[WRITABLE_INDEX]);
+ }
+ desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
+ desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
return desc;
}
@@ -535,9 +536,27 @@
}
+// ES5 section 8.12.1.
+function GetOwnProperty(obj, p) {
+ // GetOwnProperty returns an array indexed by the constants
+ // defined in macros.py.
+ // If p is not a property on obj undefined is returned.
+ var props = %GetOwnProperty(ToObject(obj), ToString(p));
+
+ // A false value here means that access checks failed.
+ if (props == false) return void 0;
+
+ return ConvertDescriptorArrayToDescriptor(props);
+}
+
+
// ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) {
- var current = GetOwnProperty(obj, p);
+ var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
+ // A false value here means that access checks failed.
+ if (current_or_access == false) return void 0;
+
+ var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj));
// Error handling according to spec.
@@ -545,10 +564,12 @@
if (IS_UNDEFINED(current) && !extensible)
throw MakeTypeError("define_disallowed", ["defineProperty"]);
- if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+ if (!IS_UNDEFINED(current)) {
// Step 5 and 6
- if ((!desc.hasEnumerable() ||
- SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+ if ((IsGenericDescriptor(desc) ||
+ IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
+ (!desc.hasEnumerable() ||
+ SameValue(desc.isEnumerable(), current.isEnumerable())) &&
(!desc.hasConfigurable() ||
SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() ||
@@ -561,30 +582,36 @@
SameValue(desc.getSet(), current.getSet()))) {
return true;
}
-
- // Step 7
- if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- // Step 9
- if (IsDataDescriptor(current) != IsDataDescriptor(desc))
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- // Step 10
- if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- if (!current.isWritable() && desc.isWritable())
+ if (!current.isConfigurable()) {
+ // Step 7
+ if (desc.isConfigurable() ||
+ (desc.hasEnumerable() &&
+ desc.isEnumerable() != current.isEnumerable()))
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- if (!current.isWritable() && desc.hasValue() &&
- !SameValue(desc.getValue(), current.getValue())) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ // Step 8
+ if (!IsGenericDescriptor(desc)) {
+ // Step 9a
+ if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ // Step 10a
+ if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
+ if (!current.isWritable() && desc.isWritable())
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ if (!current.isWritable() && desc.hasValue() &&
+ !SameValue(desc.getValue(), current.getValue())) {
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ }
+ // Step 11
+ if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
+ if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
+ if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+ throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
}
}
- // Step 11
- if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
- if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- }
- if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
- }
}
// Send flags - enumerable and configurable are common - writable is
@@ -607,7 +634,16 @@
} else
flag |= DONT_DELETE;
- if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
+ if (IsDataDescriptor(desc) ||
+ (IsGenericDescriptor(desc) &&
+ (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
+ // There are 3 cases that lead here:
+ // Step 4a - defining a new data property.
+ // Steps 9b & 12 - replacing an existing accessor property with a data
+ // property.
+ // Step 12 - updating an existing data property with a data or generic
+ // descriptor.
+
if (desc.hasWritable()) {
flag |= desc.isWritable() ? 0 : READ_ONLY;
} else if (!IS_UNDEFINED(current)) {
@@ -615,20 +651,30 @@
} else {
flag |= READ_ONLY;
}
+
var value = void 0; // Default value is undefined.
if (desc.hasValue()) {
value = desc.getValue();
- } else if (!IS_UNDEFINED(current)) {
+ } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
value = current.getValue();
}
+
%DefineOrRedefineDataProperty(obj, p, value, flag);
+ } else if (IsGenericDescriptor(desc)) {
+ // Step 12 - updating an existing accessor property with generic
+ // descriptor. Changing flags only.
+ %DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
} else {
- if (desc.hasGetter() &&
- (IS_FUNCTION(desc.getGet()) || IS_UNDEFINED(desc.getGet()))) {
- %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+ // There are 3 cases that lead here:
+ // Step 4b - defining a new accessor property.
+ // Steps 9c & 12 - replacing an existing data property with an accessor
+ // property.
+ // Step 12 - updating an existing accessor property with an accessor
+ // descriptor.
+ if (desc.hasGetter()) {
+ %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
}
- if (desc.hasSetter() &&
- (IS_FUNCTION(desc.getSet()) || IS_UNDEFINED(desc.getSet()))) {
+ if (desc.hasSetter()) {
%DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
}
}
@@ -1107,33 +1153,49 @@
}
// this_arg is not an argument that should be bound.
var argc_bound = (%_ArgumentsLength() || 1) - 1;
- if (argc_bound > 0) {
+ var fn = this;
+ if (argc_bound == 0) {
+ var result = function() {
+ if (%_IsConstructCall()) {
+ // %NewObjectFromBound implicitly uses arguments passed to this
+ // function. We do not pass the arguments object explicitly to avoid
+ // materializing it and guarantee that this function will be optimized.
+ return %NewObjectFromBound(fn, null);
+ }
+
+ return fn.apply(this_arg, arguments);
+ };
+ } else {
var bound_args = new $Array(argc_bound);
for(var i = 0; i < argc_bound; i++) {
bound_args[i] = %_Arguments(i+1);
}
+
+ var result = function() {
+ // If this is a construct call we use a special runtime method
+ // to generate the actual object using the bound function.
+ if (%_IsConstructCall()) {
+ // %NewObjectFromBound implicitly uses arguments passed to this
+ // function. We do not pass the arguments object explicitly to avoid
+ // materializing it and guarantee that this function will be optimized.
+ return %NewObjectFromBound(fn, bound_args);
+ }
+
+ // Combine the args we got from the bind call with the args
+ // given as argument to the invocation.
+ var argc = %_ArgumentsLength();
+ var args = new $Array(argc + argc_bound);
+ // Add bound arguments.
+ for (var i = 0; i < argc_bound; i++) {
+ args[i] = bound_args[i];
+ }
+ // Add arguments from call.
+ for (var i = 0; i < argc; i++) {
+ args[argc_bound + i] = %_Arguments(i);
+ }
+ return fn.apply(this_arg, args);
+ };
}
- var fn = this;
- var result = function() {
- // Combine the args we got from the bind call with the args
- // given as argument to the invocation.
- var argc = %_ArgumentsLength();
- var args = new $Array(argc + argc_bound);
- // Add bound arguments.
- for (var i = 0; i < argc_bound; i++) {
- args[i] = bound_args[i];
- }
- // Add arguments from call.
- for (var i = 0; i < argc; i++) {
- args[argc_bound + i] = %_Arguments(i);
- }
- // If this is a construct call we use a special runtime method
- // to generate the actual object using the bound function.
- if (%_IsConstructCall()) {
- return %NewObjectFromBound(fn, args);
- }
- return fn.apply(this_arg, args);
- };
// We already have caller and arguments properties on functions,
// which are non-configurable. It therefore makes no sence to
diff --git a/src/v8utils.h b/src/v8utils.h
index e9623be..0aa53ca 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -316,6 +316,39 @@
}
}
+
+// A resource for using mmapped files to back external strings that are read
+// from files.
+class MemoryMappedExternalResource: public
+ v8::String::ExternalAsciiStringResource {
+ public:
+ explicit MemoryMappedExternalResource(const char* filename);
+ MemoryMappedExternalResource(const char* filename,
+ bool remove_file_on_cleanup);
+ virtual ~MemoryMappedExternalResource();
+
+ virtual const char* data() const { return data_; }
+ virtual size_t length() const { return length_; }
+
+ bool exists() const { return file_ != NULL; }
+ bool is_empty() const { return length_ == 0; }
+
+ bool EnsureIsAscii(bool abort_if_failed) const;
+ bool EnsureIsAscii() const { return EnsureIsAscii(true); }
+ bool IsAscii() const { return EnsureIsAscii(false); }
+
+ private:
+ void Init(const char* filename);
+
+ char* filename_;
+ OS::MemoryMappedFile* file_;
+
+ const char* data_;
+ size_t length_;
+ bool remove_file_on_cleanup_;
+};
+
+
} } // namespace v8::internal
#endif // V8_V8UTILS_H_
diff --git a/src/variables.cc b/src/variables.cc
index 7f580fc..fa7ce1b 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -112,12 +112,12 @@
: scope_(scope),
name_(name),
mode_(mode),
- is_valid_LHS_(is_valid_LHS),
kind_(kind),
local_if_not_shadowed_(NULL),
+ rewrite_(NULL),
+ is_valid_LHS_(is_valid_LHS),
is_accessed_from_inner_scope_(false),
- is_used_(false),
- rewrite_(NULL) {
+ is_used_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
diff --git a/src/variables.h b/src/variables.h
index 882a52e..5d27a02 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -187,21 +187,23 @@
Scope* scope_;
Handle<String> name_;
Mode mode_;
- bool is_valid_LHS_;
Kind kind_;
Variable* local_if_not_shadowed_;
- // Usage info.
- bool is_accessed_from_inner_scope_; // set by variable resolver
- bool is_used_;
-
// Static type information
StaticType type_;
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.
Expression* rewrite_;
+
+ // Valid as a LHS? (const and this are not valid LHS, for example)
+ bool is_valid_LHS_;
+
+ // Usage info.
+ bool is_accessed_from_inner_scope_; // set by variable resolver
+ bool is_used_;
};
diff --git a/src/version.cc b/src/version.cc
index 495de31..2471bc9 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 0
-#define BUILD_NUMBER 9
+#define MINOR_VERSION 1
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 1fe9eed..285c078 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -199,8 +199,10 @@
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
+ CPU::FlushICache(pc_, sizeof(int32_t));
}
}
@@ -236,6 +238,7 @@
Assembler::set_target_address_at(pc_, target);
} else {
Memory::Address_at(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
}
@@ -271,6 +274,7 @@
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
*reinterpret_cast<Object**>(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
@@ -295,6 +299,7 @@
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ CPU::FlushICache(pc_, sizeof(Address));
}
@@ -331,6 +336,8 @@
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
target;
+ CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
+ sizeof(Address));
}
@@ -356,10 +363,14 @@
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -379,10 +390,14 @@
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -414,7 +429,7 @@
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
+ buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index de01cfa..697f6cd 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -300,6 +300,34 @@
}
}
+
+bool Operand::AddressUsesRegister(Register reg) const {
+ int code = reg.code();
+ ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
+ // Start with only low three bits of base register. Initial decoding doesn't
+ // distinguish on the REX.B bit.
+ int base_code = buf_[0] & 0x07;
+ if (base_code == rsp.code()) {
+ // SIB byte present in buf_[1].
+ // Check the index register from the SIB byte + REX.X prefix.
+ int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
+ // Index code (including REX.X) of 0x04 (rsp) means no index register.
+ if (index_code != rsp.code() && index_code == code) return true;
+ // Add REX.B to get the full base register code.
+ base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
+ // A base register of 0x05 (rbp) with mod = 0 means no base register.
+ if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ return code == base_code;
+ } else {
+ // A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
+ // no base register.
+ if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ base_code |= ((rex_ & 0x01) << 3);
+ return code == base_code;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -888,6 +916,23 @@
}
+// Calls directly to the given address using a relative offset.
+// Should only ever be used in Code objects for calls within the
+// same Code object. Should not be used when generating new code (use labels),
+// but only when patching existing code.
+void Assembler::call(Address target) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp.
+ emit(0xE8);
+ Address source = pc_ + 4;
+ intptr_t displacement = target - source;
+ ASSERT(is_int32(displacement));
+ emitl(static_cast<int32_t>(displacement));
+}
+
+
void Assembler::clc() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1143,6 +1188,16 @@
}
+void Assembler::imull(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_operand(dst, src);
+}
+
+
void Assembler::imull(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1949,6 +2004,14 @@
}
+void Assembler::push_imm32(int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x68);
+ emitl(imm32);
+}
+
+
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2641,6 +2704,30 @@
}
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this);
@@ -2721,6 +2808,17 @@
}
+void Assembler::cvttss2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2732,6 +2830,17 @@
}
+void Assembler::cvttsd2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2930,6 +3039,16 @@
}
+void Assembler::movmskpd(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x50);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
@@ -2967,10 +3086,15 @@
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(rmode != RelocInfo::NONE);
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index be837f0..91e7e6c 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -153,6 +153,7 @@
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
int code_;
+
private:
static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
static const int allocationIndexByRegisterCode[kNumRegisters];
@@ -390,11 +391,15 @@
// this must not overflow.
Operand(const Operand& base, int32_t offset);
+ // Checks whether either base or index register is the given register.
+ // Does not check the "reg" part of the Operand.
+ bool AddressUsesRegister(Register reg) const;
+
private:
byte rex_;
byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
+ // The number of bytes of buf_ in use.
+ byte len_;
// Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -548,16 +553,29 @@
// TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2;
- // The x64 JS return sequence is padded with int3 to make it large
- // enough to hold a call instruction when the debugger patches it.
+ // Some x64 JS code is padded with int3 to make it large
+ // enough to hold an instruction when the debugger patches it.
+ static const int kJumpInstructionLength = 13;
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
+ static const int kShortCallInstructionLength = 5;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
+ // One byte opcode for test al, 0xXX.
+ static const byte kTestAlByte = 0xA8;
+ // One byte opcode for nop.
+ static const byte kNopByte = 0x90;
+
+ // One byte prefix for a short conditional jump.
+ static const byte kJccShortPrefix = 0x70;
+ static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+ static const byte kJcShortOpcode = kJccShortPrefix | carry;
+
+
// ---------------------------------------------------------------------------
// Code generation
@@ -580,7 +598,7 @@
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
+ // of m, where m must be a power of 2.
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -590,6 +608,9 @@
void popfq();
void push(Immediate value);
+ // Push a 32 bit integer, and guarantee that it is actually pushed as a
+ // 32 bit value, the normal push will optimize the 8 bit case.
+ void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
@@ -707,6 +728,10 @@
arithmetic_op_32(0x1b, dst, src);
}
+ void sbbq(Register dst, Register src) {
+ arithmetic_op(0x1b, dst, src);
+ }
+
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -817,6 +842,10 @@
arithmetic_op_32(0x23, dst, src);
}
+ void andl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x23, dst, src);
+ }
+
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
@@ -845,6 +874,7 @@
void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
// Signed 32-bit multiply instructions.
void imull(Register dst, Register src); // dst = dst * src.
+ void imull(Register dst, const Operand& src); // dst = dst * src.
void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
void incq(Register dst);
@@ -878,6 +908,10 @@
arithmetic_op(0x0B, dst, src);
}
+ void orl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+
void or_(const Operand& dst, Register src) {
arithmetic_op(0x09, src, dst);
}
@@ -1041,6 +1075,18 @@
arithmetic_op_32(0x33, dst, src);
}
+ void xorl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x33, dst, src);
+ }
+
+ void xorl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+
+ void xorl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+
void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src);
}
@@ -1095,6 +1141,12 @@
void call(Label* L);
void call(Handle<Code> target, RelocInfo::Mode rmode);
+ // Calls directly to the given address using a relative offset.
+ // Should only ever be used in Code objects for calls within the
+ // same Code object. Should not be used when generating new code (use labels),
+ // but only when patching existing code.
+ void call(Address target);
+
// Call near absolute indirect, address in register
void call(Register adr);
@@ -1201,11 +1253,16 @@
void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
+ void movdqa(const Operand& dst, XMMRegister src);
+ void movdqa(XMMRegister dst, const Operand& src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
@@ -1233,16 +1290,14 @@
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ void movmskpd(Register dst, XMMRegister src);
+
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
- // Use either movsd or movlpd.
- // void movdbl(XMMRegister dst, const Operand& src);
- // void movdbl(const Operand& dst, XMMRegister src);
-
// Debugging
void Print();
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index d738261..08cd21d 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -561,7 +561,33 @@
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- __ int3();
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Get the full codegen state from the stack and untag it.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Switch on the state.
+ NearLabel not_no_registers, not_tos_rax;
+ __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ j(not_equal, ¬_no_registers);
+ __ ret(1 * kPointerSize); // Remove state.
+
+ __ bind(¬_no_registers);
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ j(not_equal, ¬_tos_rax);
+ __ ret(2 * kPointerSize); // Remove state, rax.
+
+ __ bind(¬_tos_rax);
+ __ Abort("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
@@ -570,7 +596,7 @@
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 59522d2..4b4531e 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,6 +37,28 @@
namespace internal {
#define __ ACCESS_MASM(masm)
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in eax.
+ NearLabel check_heap_number, call_builtin;
+ __ SmiTest(rax);
+ __ j(not_zero, &check_heap_number);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &call_builtin);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ pop(rcx); // Pop return address.
+ __ push(rax);
+ __ push(rcx); // Push return address.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
@@ -1015,29 +1037,6 @@
}
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- // Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
case TRBinaryOpIC::UNINITIALIZED:
@@ -1047,7 +1046,9 @@
GenerateSmiStub(masm);
break;
case TRBinaryOpIC::INT32:
- GenerateInt32Stub(masm);
+ UNREACHABLE();
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
break;
case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
@@ -1090,85 +1091,428 @@
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- UNIMPLEMENTED();
+
+ // We only generate heapnumber answers for overflowing calculations
+ // for the four basic arithmetic operations.
+ bool generate_inline_heapnumber_results =
+ (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+ (op_ == Token::ADD || op_ == Token::SUB ||
+ op_ == Token::MUL || op_ == Token::DIV);
+
+ // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
+ Register left = rdx;
+ Register right = rax;
+
+
+ // Smi check of both operands. If op is BIT_OR, the check is delayed
+ // until after the OR operation.
+ Label not_smis;
+ Label use_fp_on_smis;
+ Label restore_MOD_registers; // Only used if op_ == Token::MOD.
+
+ if (op_ != Token::BIT_OR) {
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, ¬_smis);
+ }
+
+ // Perform the operation.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ switch (op_) {
+ case Token::ADD:
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+
+ case Token::SUB:
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+
+ case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
+
+ case Token::DIV:
+ // SmiDiv will not accept left in rdx or right in rax.
+ left = rcx;
+ right = rbx;
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiDiv(rax, left, right, &use_fp_on_smis);
+ break;
+
+ case Token::MOD:
+ // SmiMod will not accept left in rdx or right in rax.
+ left = rcx;
+ right = rbx;
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiMod(rax, left, right, &use_fp_on_smis);
+ break;
+
+ case Token::BIT_OR: {
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ JumpIfNotSmi(right, ¬_smis); // Test delayed until after BIT_OR.
+ break;
+ }
+ case Token::BIT_XOR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
+
+ case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ __ movq(rax, left);
+ break;
+
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ __ movq(rax, left);
+ break;
+
+ case Token::SHR:
+ __ SmiShiftLogicalRight(left, left, right, ¬_smis);
+ __ movq(rax, left);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // 5. Emit return of result in rax. Some operations have registers pushed.
+ __ ret(0);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ // Restore left and right to rdx and rax.
+ __ movq(rdx, rcx);
+ __ movq(rax, rbx);
+ }
+
+
+ if (generate_inline_heapnumber_results) {
+ __ AllocateHeapNumber(rcx, rbx, slow);
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ }
+
+ // 7. Non-smi operands reach the end of the code generated by
+ // GenerateSmiCode, and fall through to subsequent code,
+ // with the operands in rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(¬_smis);
+ if (op_ == Token::BIT_OR) {
+ __ movq(right, rcx);
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
+ MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, allocation_failure);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we jump to the allocation_failure label, to call runtime.
+ __ jmp(allocation_failure);
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
+ heap_number_map);
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ __ Ret();
+
+ // Logical shift right can produce an unsigned int32 that is not
+ // an int32, and so is not in the smi range. Allocate a heap number
+ // in that case.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_shr_result);
+ Label allocation_failed;
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &allocation_failed,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ Ret();
+
+ __ bind(&allocation_failed);
+ // We need tagged values in rdx and rax for the following code,
+ // not int32 in rax and rcx.
+ __ Integer32ToSmi(rax, rcx);
+ __ Integer32ToSmi(rdx, rax);
+ __ jmp(allocation_failure);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ // No fall-through from this generated code.
+ if (FLAG_debug_code) {
+ __ Abort("Unexpected fall-through in "
+ "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ // Registers containing left and right operands respectively.
+ Register lhs = rdx;
+ Register rhs = rax;
+
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+ __ JumpIfNotString(lhs, r8, ¬_string1);
+
+ // First argument is a a string, test second.
+ __ JumpIfSmi(rhs, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ __ JumpIfNotString(rhs, rhs, ¬_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(¬_strings);
+ // Neither argument is a string.
+ // Pop arguments, because CallRuntimeCode wants to push them again.
+ __ pop(rcx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ push(rcx);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
+ Label not_smi;
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
+ GenerateSmiCode(masm, ¬_smi, NO_HEAPNUMBER_RESULTS);
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
- } else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- }
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
+ __ bind(¬_smi);
+ GenerateTypeTransition(masm);
}
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
+ ASSERT(op_ == Token::ADD);
+ GenerateStringAddCode(masm);
-
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ GenerateTypeTransition(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label gc_required, not_number;
+ GenerateFloatingPointCode(masm, &gc_required, ¬_number);
+
+ __ bind(¬_number);
+ GenerateTypeTransition(masm);
+
+ __ bind(&gc_required);
+ GenerateCallRuntimeCode(masm);
}
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label call_runtime, call_string_add_or_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateStringAddCode(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntimeCode(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
- UNIMPLEMENTED();
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in rdx is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rdx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ // Use object in rdx as a result holder
+ __ movq(rax, rdx);
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
}
@@ -1490,6 +1834,7 @@
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
+// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
Register heap_number_map) {
@@ -1499,28 +1844,27 @@
Label load_arg2, done;
__ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(r8, rdx);
__ jmp(&load_arg2);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
+ __ movl(r8, Immediate(0));
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
+ // Get the untagged integer version of the rdx heap number in rcx.
+ IntegerConvert(masm, r8, rdx);
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ // Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
__ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
+ __ SmiToInteger32(rcx, rax);
__ jmp(&done);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
@@ -1536,7 +1880,7 @@
// Get the untagged integer version of the rax heap number in rcx.
IntegerConvert(masm, rcx, rax);
__ bind(&done);
- __ movl(rax, rdx);
+ __ movl(rax, r8);
}
@@ -1866,11 +2210,11 @@
}
// Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
+ // rsp[0]: return address
+ // rsp[8]: last_match_info (expected JSArray)
+ // rsp[16]: previous index
+ // rsp[24]: subject string
+ // rsp[32]: JSRegExp object
static const int kLastMatchInfoOffset = 1 * kPointerSize;
static const int kPreviousIndexOffset = 2 * kPointerSize;
@@ -2212,7 +2556,7 @@
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // Allocate RegExpResult followed by FixedArray with size in rbx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
@@ -2271,7 +2615,7 @@
Label loop;
__ testl(rbx, rbx);
__ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
+ __ j(less_equal, &done); // Jump if rcx is negative or zero.
__ subl(rbx, Immediate(1));
__ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
__ jmp(&loop);
@@ -2634,7 +2978,7 @@
// undefined, and are equal.
__ Set(rax, EQUAL);
__ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
+ // Return non-equal by returning the non-zero object pointer in rax,
// or return equal if we fell through to here.
__ ret(0);
__ bind(¬_both_objects);
@@ -2774,8 +3118,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
+ bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
@@ -2868,7 +3211,7 @@
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame();
+ __ LeaveExitFrame(save_doubles_);
__ ret(0);
// Handling of failure.
@@ -2977,7 +3320,7 @@
#else
int arg_stack_space = 0;
#endif
- __ EnterExitFrame(arg_stack_space);
+ __ EnterExitFrame(arg_stack_space, save_doubles_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -3130,7 +3473,7 @@
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
+ // If current RBP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ movq(kScratchRegister, js_entry_sp);
__ cmpq(rbp, Operand(kScratchRegister, 0));
@@ -3248,6 +3591,12 @@
}
+Register InstanceofStub::left() { return rax; }
+
+
+Register InstanceofStub::right() { return rdx; }
+
+
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
@@ -4272,24 +4621,168 @@
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::SMIS);
+ NearLabel miss;
+ __ JumpIfNotBothSmi(rdx, rax, &miss);
+
+ if (GetCondition() == equal) {
+ // For equality we do not care about the sign of the result.
+ __ subq(rax, rdx);
+ } else {
+ NearLabel done;
+ __ subq(rdx, rax);
+ __ j(no_overflow, &done);
+ // Correct sign of result in case of overflow.
+ __ SmiNot(rdx, rdx);
+ __ bind(&done);
+ __ movq(rax, rdx);
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+ NearLabel generic_stub;
+ NearLabel unordered;
+ NearLabel miss;
+ Condition either_smi = masm->CheckEitherSmi(rax, rdx);
+ __ j(either_smi, &generic_stub);
+
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+
+ // Load left and right operand
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Compare operands
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ // Performing mov, because xor would destroy the flag register.
+ __ movl(rax, Immediate(0));
+ __ movl(rcx, Immediate(0));
+ __ setcc(above, rax); // Add one to zero if carry clear and not equal.
+ __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ ret(0);
+
+ __ bind(&unordered);
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+ __ bind(&generic_stub);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::OBJECTS);
+ NearLabel miss;
+ Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+ __ j(either_smi, &miss);
+
+ __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &miss, not_taken);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &miss, not_taken);
+
+ ASSERT(GetCondition() == equal);
+ __ subq(rax, rdx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ // Save the registers.
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+ __ push(rcx);
+
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ __ EnterInternalFrame();
+ __ push(rdx);
+ __ push(rax);
+ __ Push(Smi::FromInt(op_));
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+
+ // Compute the entry point of the rewritten stub.
+ __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+
+ // Restore registers.
+ __ pop(rcx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ push(rcx);
+
+ // Do a tail call to the rewritten stub.
+ __ jmp(rdi);
}
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // elements - is set to the the receiver's element if
+ // the receiver doesn't have a pixel array or the
+ // key is not a smi, otherwise it's the elements'
+ // external pointer.
+ // untagged_key - is set to the untagged key
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiToInteger32(untagged_key, key);
+
+ // Verify that the receiver has pixel array elements.
+ __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+ // Check that the smi is in range.
+ __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Load and tag the element as a smi.
+ __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
+ __ Integer32ToSmi(result, result);
+ __ ret(0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 9feced2..8051d4b 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -270,6 +270,11 @@
void GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure);
+ void GenerateStringAddCode(MacroAssembler* masm);
+ void GenerateCallRuntimeCode(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
@@ -447,6 +452,25 @@
};
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a543a50..fe90567 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2993,21 +2993,22 @@
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
- masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
+ int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, rcx);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint.
- // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
+ // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
// with length 7 (3 + 1 + 3).
const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@@ -4893,7 +4894,8 @@
Load(property->value());
if (property->emit_store()) {
Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false);
+ frame_->CallStoreIC(Handle<String>::cast(key), false,
+ strict_mode_flag());
// A test rax instruction following the store IC call would
// indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such
@@ -5402,9 +5404,12 @@
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result);
slow.Bind();
@@ -5421,8 +5426,11 @@
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
@@ -6969,10 +6977,12 @@
__ j(not_equal, ¬_minus_half);
// Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
@@ -6985,7 +6995,9 @@
call_runtime.Branch(not_equal);
// Calculates square root.
- __ movsd(xmm1, xmm0);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
JumpTarget done;
@@ -7235,19 +7247,13 @@
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
+ // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(rsi);
frame_->EmitPush(variable->name());
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}
@@ -8229,7 +8235,7 @@
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual);
+ result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// A test rax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test rax
// instruction here.
@@ -8329,7 +8335,7 @@
slow.Bind(&value, &receiver);
frame()->Push(&receiver);
frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual);
+ result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// Encode the offset to the map check instruction and the offset
// to the write barrier store address computation in a test rax
// instruction.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index b308f64..c283db3 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -357,6 +357,7 @@
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 30134bf..513c522 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -43,6 +43,9 @@
void CPU::Setup() {
CpuFeatures::Probe(true);
+ if (Serializer::enabled()) {
+ V8::DisableCrankshaft();
+ }
}
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 6b19d3f..ed6c47b 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,19 +40,176 @@
int Deoptimizer::table_entry_size_ = 10;
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- // UNIMPLEMENTED, for now just return.
- return;
+
+int Deoptimizer::patch_size() {
+ return MacroAssembler::kCallInstructionLength;
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
- Code* replacement_code) {
+#ifdef DEBUG
+// Overwrites code with int3 instructions.
+static void ZapCodeRange(Address from, Address to) {
+ CHECK(from <= to);
+ int length = static_cast<int>(to - from);
+ CodePatcher destroyer(from, length);
+ while (length-- > 0) {
+ destroyer.masm()->int3();
+ }
+}
+#endif
+
+
+// Iterate through the entries of a SafepointTable that corresponds to
+// deoptimization points.
+class SafepointTableDeoptimiztionEntryIterator {
+ public:
+ explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
+ : code_(code), table_(code), index_(-1), limit_(table_.length()) {
+ FindNextIndex();
+ }
+
+ SafepointEntry Next(Address* pc) {
+ if (index_ >= limit_) {
+ *pc = NULL;
+ return SafepointEntry(); // Invalid entry.
+ }
+ *pc = code_->instruction_start() + table_.GetPcOffset(index_);
+ SafepointEntry entry = table_.GetEntry(index_);
+ FindNextIndex();
+ return entry;
+ }
+
+ private:
+ void FindNextIndex() {
+ ASSERT(index_ < limit_);
+ while (++index_ < limit_) {
+ if (table_.GetEntry(index_).deoptimization_index() !=
+ Safepoint::kNoDeoptimizationIndex) {
+ return;
+ }
+ }
+ }
+
+ Code* code_;
+ SafepointTable table_;
+ // Index of next deoptimization entry. If negative after calling
+ // FindNextIndex, there are no more, and Next will return an invalid
+ // SafepointEntry.
+ int index_;
+ // Table length.
+ int limit_;
+};
+
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ AssertNoAllocation no_allocation;
+
+ if (!function->IsOptimized()) return;
+
+ // Get the optimized code.
+ Code* code = function->code();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each return after a safepoint insert a absolute call to the
+ // corresponding deoptimization entry, or a short call to an absolute
+ // jump if space is short. The absolute jumps are put in a table just
+ // before the safepoint table (space was allocated there when the Code
+ // object was created, if necessary).
+
+ Address instruction_start = function->code()->instruction_start();
+ Address jump_table_address =
+ instruction_start + function->code()->safepoint_table_offset();
+ Address previous_pc = instruction_start;
+
+ SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
+ Address entry_pc = NULL;
+
+ SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
+ while (current_entry.is_valid()) {
+ int gap_code_size = current_entry.gap_code_size();
+ unsigned deoptimization_index = current_entry.deoptimization_index();
+
+#ifdef DEBUG
+ // Destroy the code which is not supposed to run again.
+ ZapCodeRange(previous_pc, entry_pc);
+#endif
+ // Position where Call will be patched in.
+ Address call_address = entry_pc + gap_code_size;
+ // End of call instruction, if using a direct call to a 64-bit address.
+ Address call_end_address =
+ call_address + MacroAssembler::kCallInstructionLength;
+
+ // Find next deoptimization entry, if any.
+ Address next_pc = NULL;
+ SafepointEntry next_entry = deoptimizations.Next(&next_pc);
+
+ if (!next_entry.is_valid() || next_pc >= call_end_address) {
+ // Room enough to write a long call instruction.
+ CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
+ patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
+ RelocInfo::NONE);
+ previous_pc = call_end_address;
+ } else {
+ // Not room enough for a long Call instruction. Write a short call
+ // instruction to a long jump placed elsewhere in the code.
+ Address short_call_end_address =
+ call_address + MacroAssembler::kShortCallInstructionLength;
+ ASSERT(next_pc >= short_call_end_address);
+
+ // Write jump in jump-table.
+ jump_table_address -= MacroAssembler::kJumpInstructionLength;
+ CodePatcher jump_patcher(jump_table_address,
+ MacroAssembler::kJumpInstructionLength);
+ jump_patcher.masm()->Jump(
+ GetDeoptimizationEntry(deoptimization_index, LAZY),
+ RelocInfo::NONE);
+
+ // Write call to jump at call_offset.
+ CodePatcher call_patcher(call_address,
+ MacroAssembler::kShortCallInstructionLength);
+ call_patcher.masm()->call(jump_table_address);
+ previous_pc = short_call_end_address;
+ }
+
+ // Continue with next deoptimization entry.
+ current_entry = next_entry;
+ entry_pc = next_pc;
+ }
+
+#ifdef DEBUG
+ // Destroy the code which is not supposed to run again.
+ ZapCodeRange(previous_pc, jump_table_address);
+#endif
+
+ // Add the deoptimizing code to the list.
+ DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+ node->set_next(deoptimizing_code_list_);
+ deoptimizing_code_list_ = node;
+
+ // Set the code for the function to non-optimized version.
+ function->ReplaceCode(function->shared()->code());
+
+ if (FLAG_trace_deopt) {
+ PrintF("[forced deoptimization: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+}
+
+
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
@@ -64,20 +221,382 @@
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
- UNIMPLEMENTED();
+ // Read the ast node id, function, and frame height for this output frame.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ int node_id = iterator->Next();
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ intptr_t top_address;
+ if (is_bottommost) {
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the function so long as we don't
+ // optimize functions that need local contexts.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function->context());
+ // The context for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (is_topmost) output_frame->SetRegister(rsi.code(), value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost) {
+ Code* continuation = (bailout_type_ == EAGER)
+ ? Builtins::builtin(Builtins::NotifyDeoptimized)
+ : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+
+ if (output_count_ - 1 == frame_index) iterator->Done();
}
+#define __ masm()->
+
void Deoptimizer::EntryGenerator::Generate() {
- // UNIMPLEMENTED, for now just return.
- return;
+ GeneratePrologue();
+ CpuFeatures::Scope scope(SSE2);
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ const int kDoubleRegsSize = kDoubleSize *
+ XMMRegister::kNumAllocatableRegisters;
+ __ subq(rsp, Immediate(kDoubleRegsSize));
+
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movsd(Operand(rsp, offset), xmm_reg);
+ }
+
+ // We push all registers onto the stack, even though we do not need
+ // to restore all later.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ Register r = Register::toRegister(i);
+ __ push(r);
+ }
+
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+ kDoubleRegsSize;
+
+ // When calling new_deoptimizer_function we need to pass the last argument
+ // on the stack on windows and in r8 on linux. The remaining arguments are
+ // all passed in registers (different ones on linux and windows though).
+
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // We use this to keep the value of the fifth argument temporarily.
+ // Unfortunately we can't store it directly in r8 (used for passing
+ // this on linux), since it is another parameter passing register on windows.
+ Register arg5 = r11;
+
+ // Get the bailout id from the stack.
+ __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object if possible
+ // and compute the fp-to-sp delta in register arg5.
+ if (type() == EAGER) {
+ __ Set(arg4, 0);
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ } else {
+ __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ }
+
+ __ subq(arg5, rbp);
+ __ neg(arg5);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(5);
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(arg1, rax);
+ __ movq(arg2, Immediate(type()));
+ // Args 3 and 4 are already in the right registers.
+
+ // On windows put the argument on the stack (PrepareCallCFunction have
+ // created space for this). On linux pass the argument in r8.
+#ifdef _WIN64
+ __ movq(Operand(rsp, 0 * kPointerSize), arg5);
+#else
+ __ movq(r8, arg5);
+#endif
+
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ // Preserve deoptimizer object in register rax and get the input
+ // frame descriptor pointer.
+ __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters -1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(rbx, offset));
+ }
+
+ // Fill in the double input registers.
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ __ pop(Operand(rbx, dst_offset));
+ }
+
+ // Remove the bailout id from the stack.
+ if (type() == EAGER) {
+ __ addq(rsp, Immediate(kPointerSize));
+ } else {
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ }
+
+ // Compute a pointer to the unwinding limit in register rcx; that is
+ // the first stack slot not part of the input frame.
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addq(rcx, rsp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(rdx, 0));
+ __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ cmpq(rcx, rsp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(rax);
+ __ PrepareCallCFunction(1);
+ __ movq(arg1, rax);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ pop(rax);
+
+ // Replace the current frame with the output frames.
+ Label outer_push_loop, inner_push_loop;
+ // Outer loop state: rax = current FrameDescription**, rdx = one past the
+ // last FrameDescription**.
+ __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
+ __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ bind(&outer_push_loop);
+ // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
+ __ movq(rbx, Operand(rax, 0));
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ bind(&inner_push_loop);
+ __ subq(rcx, Immediate(sizeof(intptr_t)));
+ __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ testq(rcx, rcx);
+ __ j(not_zero, &inner_push_loop);
+ __ addq(rax, Immediate(kPointerSize));
+ __ cmpq(rax, rdx);
+ __ j(below, &outer_push_loop);
+
+ // In case of OSR, we have to restore the XMM registers.
+ if (type() == OSR) {
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(rbx, src_offset));
+ }
+ }
+
+ // Push state, pc, and continuation from the last output frame.
+ if (type() != OSR) {
+ __ push(Operand(rbx, FrameDescription::state_offset()));
+ }
+ __ push(Operand(rbx, FrameDescription::pc_offset()));
+ __ push(Operand(rbx, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(rbx, offset));
+ }
+
+ // Restore the registers from the stack.
+ for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
+ Register r = Register::toRegister(i);
+ // Do not restore rsp, simply pop the value into the next register
+ // and overwrite this afterwards.
+ if (r.is(rsp)) {
+ ASSERT(i > 0);
+ r = Register::toRegister(i - 1);
+ }
+ __ pop(r);
+ }
+
+ // Set up the roots register.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(r13, roots_address);
+
+ __ movq(kSmiConstantRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+
+ // Return to the continuation point.
+ __ ret(0);
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UNIMPLEMENTED();
+ // Create a sequence of deoptimization entries.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ push_imm32(i);
+ __ jmp(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
}
+#undef __
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 7502d61..f73f948 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1025,11 +1025,19 @@
rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x6F) {
+ AppendToBuffer("movdqa %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x7F) {
+ AppendToBuffer("movdqa ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else {
const char* mnemonic = "?";
if (opcode == 0x57) {
@@ -1038,6 +1046,8 @@
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x50) {
+ mnemonic = "movmskpd";
} else {
UnimplementedInstruction();
}
@@ -1113,9 +1123,11 @@
} else if (opcode == 0x2C) {
// CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer.
- // Assert that mod is not 3, so source is memory, not an XMM register.
- ASSERT_NE(0xC0, *current & 0xC0);
- current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("cvttss2si%c %s,",
+ operand_size_code(), NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x5A) {
// CVTSS2SD:
// Convert scalar single-precision FP to scalar double-precision FP.
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index a2a0e7e..998b3e9 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -31,7 +31,7 @@
namespace v8 {
namespace internal {
-static const int kNumRegs = 8;
+static const int kNumRegs = 16;
static const RegList kJSCallerSaved =
1 << 0 | // rax
1 << 1 | // rcx
@@ -44,8 +44,7 @@
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
-// TODO(x64): This should not be 0.
-static const int kNumSafepointRegisters = 8;
+static const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 724a7c5..556ec85 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -43,6 +43,58 @@
#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm)
+ : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ __ testb(reg, Immediate(kSmiTagMask));
+ EmitJump(not_carry, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ __ testb(reg, Immediate(kSmiTagMask));
+ EmitJump(carry, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ testl(rax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ // jc will be patched with jz, jnc will become jnz.
+ void EmitJump(Condition cc, NearLabel* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ ASSERT(cc == carry || cc == not_carry);
+ __ bind(&patch_site_);
+ __ j(cc, target);
+ }
+
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -245,19 +297,22 @@
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((scope()->num_parameters() + 1) * kPointerSize);
+
+ int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, rcx);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+ // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
// (3 + 1 + 3).
const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
@@ -659,18 +714,24 @@
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- VisitForStackValue(prop->obj());
- if (function != NULL) {
- VisitForStackValue(prop->key());
- VisitForAccumulatorValue(function);
- __ pop(rcx);
- } else {
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, result_register());
- __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ // property. Use (keyed) IC to set the initial value. We
+ // cannot visit the rewrite because it's shared and we risk
+ // recording duplicate AST IDs for bailouts from optimized code.
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- __ pop(rdx);
+ if (function != NULL) {
+ __ push(rax);
+ VisitForAccumulatorValue(function);
+ __ pop(rdx);
+ } else {
+ __ movq(rdx, rax);
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ }
+ ASSERT(prop->key()->AsLiteral() != NULL &&
+ prop->key()->AsLiteral()->handle()->IsSmi());
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -710,6 +771,8 @@
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
+ clause->body_target()->entry_label()->Unuse();
+
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@@ -726,21 +789,25 @@
// Perform the comparison as if via '==='.
__ movq(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- Label slow_case;
- __ JumpIfNotBothSmi(rdx, rax, &slow_case);
- __ SmiCompare(rdx, rax);
+ NearLabel slow_case;
+ __ movq(rcx, rdx);
+ __ or_(rcx, rax);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+
+ __ cmpq(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target()->entry_label());
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(equal, true, flags);
- __ CallStub(&stub);
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site);
+
__ testq(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1520,21 +1587,17 @@
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
- Label done, stub_call, smi_case;
+ NearLabel done, stub_call, smi_case;
__ pop(rdx);
__ movq(rcx, rax);
- Condition smi = masm()->CheckBothSmi(rdx, rax);
- __ j(smi, &smi_case);
+ __ or_(rax, rdx);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(rax, &smi_case);
__ bind(&stub_call);
- GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
- if (stub.ArgsInRegistersSupported()) {
- stub.GenerateCall(masm_, rdx, rcx);
- } else {
- __ push(rdx);
- __ push(rcx);
- __ CallStub(&stub);
- }
+ __ movq(rax, rcx);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
__ bind(&smi_case);
@@ -1578,14 +1641,9 @@
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
- GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
- if (stub.ArgsInRegistersSupported()) {
- __ pop(rdx);
- stub.GenerateCall(masm_, rdx, rax);
- } else {
- __ push(result_register());
- __ CallStub(&stub);
- }
+ __ pop(rdx);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
context()->Plug(rax);
}
@@ -1657,8 +1715,10 @@
// rcx, and the global object on the stack.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic(Builtins::builtin(is_strict()
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@@ -1932,7 +1992,9 @@
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -2006,16 +2068,21 @@
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
if (prop->is_synthetic()) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForAccumulatorValue(prop->key());
- }
+ // Do not visit the object and key subexpressions (they are shared
+ // by all occurrences of the same rewritten parameter).
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+ Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+ MemOperand operand = EmitSlotSearch(slot, rdx);
+ __ movq(rdx, operand);
+
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+ __ Move(rax, prop->key()->AsLiteral()->handle());
+
// Record source code position for IC call.
SetSourcePosition(prop->position());
- __ pop(rdx); // We do not need to keep the receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -2026,6 +2093,9 @@
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
EmitCallWithStub(expr);
} else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
}
@@ -2992,26 +3062,29 @@
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
- } else {
- // Property or variable reference. Call the delete builtin with
- // object and property name as arguments.
- if (prop != NULL) {
+ } else if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- } else if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ Push(var->name());
- } else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(rax);
- __ Push(var->name());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(rax);
}
+ } else if (var->is_global()) {
+ __ push(GlobalObjectOperand());
+ __ Push(var->name());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(rax);
}
break;
}
@@ -3054,8 +3127,8 @@
Label no_conversion;
Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, &no_conversion);
- __ push(result_register());
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
context()->Plug(result_register());
break;
@@ -3171,8 +3244,8 @@
Condition is_smi;
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &no_conversion);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
// Save result for postfix expressions.
@@ -3196,7 +3269,9 @@
}
// Inline smi case if we are in a loop.
- Label stub_call, done;
+ NearLabel stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
@@ -3206,8 +3281,8 @@
__ j(overflow, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &done);
+ patch_site.EmitJumpIfSmi(rax, &done);
+
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
@@ -3221,10 +3296,14 @@
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- GenericBinaryOpStub stub(expr->binary_op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- stub.GenerateCall(masm_, rax, Smi::FromInt(1));
+ TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ if (expr->op() == Token::INC) {
+ __ Move(rdx, Smi::FromInt(1));
+ } else {
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
+ }
+ EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in rax.
@@ -3494,19 +3573,21 @@
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- Label slow_case;
- __ JumpIfNotBothSmi(rax, rdx, &slow_case);
- __ SmiCompare(rdx, rax);
+ NearLabel slow_case;
+ __ movq(rcx, rdx);
+ __ or_(rcx, rax);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ __ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(cc, strict, flags);
- __ CallStub(&stub);
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@@ -3569,10 +3650,30 @@
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, mode);
// Crankshaft doesn't need patching of inlined loads and stores.
- if (V8::UseCrankshaft()) return;
+ // When compiling the snapshot we need to produce code that works
+ // with and without Crankshaft.
+ if (V8::UseCrankshaft() && !Serializer::enabled()) {
+ return;
+ }
// If we're calling a (keyed) load or store stub, we have to mark
// the call as containing no inlined code so we will not attempt to
@@ -3591,6 +3692,16 @@
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index b54aeb9..8c2856f 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -397,7 +397,7 @@
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -405,7 +405,8 @@
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
+ support_wrappers);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -579,20 +580,15 @@
__ ret(0);
__ bind(&check_pixel_array);
- // Check whether the elements object is a pixel array.
- // rdx: receiver
- // rax: key
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kPixelArrayMapRootIndex);
- __ j(not_equal, &check_number_dictionary);
- __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movzxbq(rax, Operand(rax, rbx, times_1, 0));
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
+ GenerateFastPixelArrayLoad(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rax,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
@@ -727,131 +723,6 @@
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
-
- // Check that the object is a JS object.
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: index (as a smi)
- // rdx: JSObject
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rcx, rax);
- __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalIntArray:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalUnsignedIntArray:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- NearLabel box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
@@ -1023,149 +894,6 @@
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
- // Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
-
- // Check that the object is a JS object.
- __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- NearLabel check_heap_number;
- __ JumpIfNotSmi(rax, &check_heap_number);
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else {
- // Need to perform float-to-int conversion.
- // Test the value for NaN.
-
- // Convert to int32 and store the low byte/word.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ cvtsd2si(rdx, xmm0);
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ cvtsd2si(rdx, xmm0);
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // Convert to int64, so that NaN and infinities become
- // 0x8000000000000000, which is zero mod 2^32.
- __ cvtsd2siq(rdx, xmm0);
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm);
-}
-
-
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
@@ -1745,7 +1473,8 @@
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1756,7 +1485,8 @@
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
- MONOMORPHIC);
+ MONOMORPHIC,
+ extra_ic_state);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
@@ -1945,11 +1675,23 @@
}
+static bool HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode();
@@ -1967,10 +1709,43 @@
Token::Name(op_));
}
#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
}
void PatchInlinedSmiCode(Address address) {
- UNIMPLEMENTED();
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ ASSERT(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ address, test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. There must be a
+ // short jump-if-carry/not-carry at this position.
+ Address jmp_address = test_instruction_address - delta;
+ ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode);
+ Condition cc = *jmp_address == Assembler::kJncShortOpcode
+ ? not_zero
+ : zero;
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 151fad7..36c9aac 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -37,157 +37,6 @@
namespace internal {
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -204,7 +53,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -339,6 +188,20 @@
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
+ // Ensure that there is space at the end of the code to write a number
+ // of jump instructions, as well as to afford writing a call near the end
+ // of the code.
+ // The jumps are used when there isn't room in the code stream to write
+ // a long call instruction. Instead it writes a shorter call to a
+ // jump instruction in the same code object.
+ // The calls are used when lazy deoptimizing a function and calls to a
+ // deoptimization function.
+ int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
+ static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
+ int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
+ while (byte_count-- > 0) {
+ __ int3();
+ }
safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted();
}
@@ -493,17 +356,11 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
- if (instr != NULL) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RegisterLazyDeoptimization(instr);
- } else {
- LPointerMap no_pointers(0);
- RecordPosition(no_pointers.position());
- __ call(code, mode);
- RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
- }
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ __ call(code, mode);
+ RegisterLazyDeoptimization(instr);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -517,7 +374,13 @@
void LCodeGen::CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
- Abort("Unimplemented: %s", "CallRuntime");
+ ASSERT(instr != NULL);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ __ CallRuntime(function, num_arguments);
+ RegisterLazyDeoptimization(instr);
}
@@ -567,7 +430,24 @@
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- Abort("Unimplemented: %s", "Deoptimiz");
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(entry != NULL);
+ if (entry == NULL) {
+ Abort("bailout was not prepared");
+ return;
+ }
+
+ if (cc == no_condition) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ NearLabel done;
+ __ j(NegateCondition(cc), &done);
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&done);
+ }
}
@@ -629,37 +509,41 @@
}
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
+
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- deoptimization_index);
+ kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
+ if (kind & Safepoint::kWithRegisters) {
+ // Register rsi always contains a pointer to the context.
+ safepoint.DefinePointerRegister(rsi);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegisters(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi);
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+ deoptimization_index);
}
@@ -682,86 +566,7 @@
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // xmm0 must always be a scratch register.
- XMMRegister xmm_scratch = xmm0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register cpu_scratch = kScratchRegister;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(xmm_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
- if (from->IsConstantOperand()) {
- LConstantOperand* constant_from = LConstantOperand::cast(from);
- if (to->IsRegister()) {
- if (IsInteger32Constant(constant_from)) {
- __ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
- } else {
- __ Move(ToRegister(to), ToHandle(constant_from));
- }
- } else {
- if (IsInteger32Constant(constant_from)) {
- __ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
- } else {
- __ Move(ToOperand(to), ToHandle(constant_from));
- }
- }
- } else if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ movq(ToRegister(to), cpu_scratch);
- } else if (to->IsStackSlot()) {
- __ movq(ToOperand(to), cpu_scratch);
- } else if (to->IsDoubleRegister()) {
- __ movsd(ToDoubleRegister(to), xmm_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- __ movsd(ToOperand(to), xmm_scratch);
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister()) {
- __ movq(cpu_scratch, ToRegister(from));
- } else if (from->IsStackSlot()) {
- __ movq(cpu_scratch, ToOperand(from));
- } else if (from->IsDoubleRegister()) {
- __ movsd(xmm_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- __ movsd(xmm_scratch, ToOperand(from));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ movq(ToRegister(to), ToRegister(from));
- } else {
- __ movq(ToOperand(to), ToRegister(from));
- }
- } else if (to->IsRegister()) {
- __ movq(ToRegister(to), ToOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ push(rax);
- __ movq(rax, ToOperand(from));
- __ movq(ToOperand(to), rax);
- __ pop(rax);
- } else if (from->IsDoubleRegister()) {
- ASSERT(to->IsDoubleStackSlot());
- __ movsd(ToOperand(to), ToDoubleRegister(from));
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- __ movsd(ToDoubleRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- __ movsd(xmm_scratch, ToOperand(from));
- __ movsd(ToOperand(to), xmm_scratch);
- }
- }
+ resolver_.Resolve(move);
}
@@ -788,7 +593,56 @@
void LCodeGen::DoCallStub(LCallStub* instr) {
- Abort("Unimplemented: %s", "DoCallStub");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpConstructResult: {
+ RegExpConstructResultStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCharAt: {
+ // TODO(1116): Add StringCharAt stub to x64.
+ Abort("Unimplemented: %s", "StringCharAt Stub");
+ break;
+ }
+ case CodeStub::MathPow: {
+ // TODO(1115): Add MathPow stub to x64.
+ Abort("Unimplemented: %s", "MathPow Stub");
+ break;
+ }
+ case CodeStub::NumberToString: {
+ NumberToStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringAdd: {
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::TranscendentalCache: {
+ TranscendentalCacheStub stub(instr->transcendental_type());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
@@ -803,24 +657,224 @@
void LCodeGen::DoDivI(LDivI* instr) {
- Abort("Unimplemented: %s", "DoDivI");}
+ LOperand* right = instr->InputAt(1);
+ ASSERT(ToRegister(instr->result()).is(rax));
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+
+ Register left_reg = rax;
+
+ // Check for x / 0.
+ Register right_reg = ToRegister(right);
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel left_not_zero;
+ __ testl(left_reg, left_reg);
+ __ j(not_zero, &left_not_zero);
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (-kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ NearLabel left_not_min_int;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &left_not_min_int);
+ __ cmpl(right_reg, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
+ // Sign extend to rdx.
+ __ cdq();
+ __ idivl(right_reg);
+
+ // Deoptimize if remainder is not 0.
+ __ testl(rdx, rdx);
+ DeoptimizeIf(not_zero, instr->environment());
+}
void LCodeGen::DoMulI(LMulI* instr) {
- Abort("Unimplemented: %s", "DoMultI");}
+ Register left = ToRegister(instr->InputAt(0));
+ LOperand* right = instr->InputAt(1);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ movl(kScratchRegister, left);
+ }
+
+ if (right->IsConstantOperand()) {
+ int right_value = ToInteger32(LConstantOperand::cast(right));
+ __ imull(left, left, Immediate(right_value));
+ } else if (right->IsStackSlot()) {
+ __ imull(left, ToOperand(right));
+ } else {
+ __ imull(left, ToRegister(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Bail out if the result is supposed to be negative zero.
+ NearLabel done;
+ __ testl(left, left);
+ __ j(not_zero, &done);
+ if (right->IsConstantOperand()) {
+ if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else if (right->IsStackSlot()) {
+ __ or_(kScratchRegister, ToOperand(right));
+ DeoptimizeIf(sign, instr->environment());
+ } else {
+ // Test the non-zero operand for negative sign.
+ __ or_(kScratchRegister, ToRegister(right));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ bind(&done);
+ }
+}
void LCodeGen::DoBitI(LBitI* instr) {
- Abort("Unimplemented: %s", "DoBitI");}
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+
+ if (right->IsConstantOperand()) {
+ int right_operand = ToInteger32(LConstantOperand::cast(right));
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), Immediate(right_operand));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), Immediate(right_operand));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), Immediate(right_operand));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (right->IsStackSlot()) {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), ToOperand(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ ASSERT(right->IsRegister());
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), ToRegister(right));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), ToRegister(right));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), ToRegister(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
void LCodeGen::DoShiftI(LShiftI* instr) {
- Abort("Unimplemented: %s", "DoShiftI");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+ if (right->IsRegister()) {
+ ASSERT(ToRegister(right).is(rcx));
+
+ switch (instr->op()) {
+ case Token::SAR:
+ __ sarl_cl(ToRegister(left));
+ break;
+ case Token::SHR:
+ __ shrl_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ testl(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ __ shll_cl(ToRegister(left));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int value = ToInteger32(LConstantOperand::cast(right));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sarl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ case Token::SHR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ testl(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(negative, instr->environment());
+ } else {
+ __ shrl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ __ shll(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
}
void LCodeGen::DoSubI(LSubI* instr) {
- Abort("Unimplemented: %s", "DoSubI");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+
+ if (right->IsConstantOperand()) {
+ __ subl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ __ subl(ToRegister(left), ToRegister(right));
+ } else {
+ __ subl(ToRegister(left), ToOperand(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -854,18 +908,29 @@
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
+ ASSERT(instr->result()->IsRegister());
__ Move(ToRegister(instr->result()), instr->value());
}
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Abort("Unimplemented: %s", "DoJSArrayLength");
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
- Abort("Unimplemented: %s", "DoFixedArrayLength");
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, PixelArray::kLengthOffset));
}
@@ -875,12 +940,20 @@
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Abort("Unimplemented: %s", "DoBitNotI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->Equals(instr->result()));
+ __ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Abort("Unimplemented: %s", "DoThrow");
+ __ push(ToRegister(instr->InputAt(0)));
+ CallRuntime(Runtime::kThrow, 1, instr);
+
+ if (FLAG_debug_code) {
+ Comment("Unreachable code.");
+ __ int3();
+ }
}
@@ -914,8 +987,7 @@
ASSERT(ToRegister(instr->InputAt(1)).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS);
- stub.SetArgsInRegisters();
+ TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -930,12 +1002,88 @@
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- Abort("Unimplemented: %s", "EmitBranch");
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ if (cc != always) {
+ __ jmp(chunk_->GetAssemblyLabel(right_block));
+ }
+ }
}
void LCodeGen::DoBranch(LBranch* instr) {
- Abort("Unimplemented: %s", "DoBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Representation r = instr->hydrogen()->representation();
+ if (r.IsInteger32()) {
+ Register reg = ToRegister(instr->InputAt(0));
+ __ testl(reg, reg);
+ EmitBranch(true_block, false_block, not_zero);
+ } else if (r.IsDouble()) {
+ XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(reg, xmm0);
+ EmitBranch(true_block, false_block, not_equal);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->InputAt(0));
+ HType type = instr->hydrogen()->type();
+ if (type.IsBoolean()) {
+ __ Cmp(reg, Factory::true_value());
+ EmitBranch(true_block, false_block, equal);
+ } else if (type.IsSmi()) {
+ __ SmiCompare(reg, Smi::FromInt(0));
+ EmitBranch(true_block, false_block, not_equal);
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, false_label);
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ j(equal, false_label);
+ __ SmiCompare(reg, Smi::FromInt(0));
+ __ j(equal, false_label);
+ __ JumpIfSmi(reg, true_label);
+
+ // Test for double values. Plus/minus zero and NaN are false.
+ NearLabel call_stub;
+ __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_stub);
+
+ // HeapNumber => false iff +0, -0, or NaN. These three cases set the
+ // zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ j(zero, false_label);
+ __ jmp(true_label);
+
+ // The conversion stub doesn't cause garbage collections so it's
+ // safe to not record a safepoint after the call.
+ __ bind(&call_stub);
+ ToBooleanStub stub;
+ __ Pushad();
+ __ push(reg);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ Popad();
+ EmitBranch(true_block, false_block, not_zero);
+ }
+ }
}
@@ -957,7 +1105,11 @@
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- Abort("Unimplemented: %s", "DoDeferredStackCheck");
+ __ Pushad();
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ __ Popad();
}
@@ -979,7 +1131,7 @@
}
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
switch (op) {
case Token::EQ:
@@ -1008,67 +1160,282 @@
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- Abort("Unimplemented: %s", "EmitCmpI");
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (left->IsRegister()) {
+ __ cmpl(ToRegister(left), Immediate(value));
+ } else {
+ __ cmpl(ToOperand(left), Immediate(value));
+ }
+ } else if (right->IsRegister()) {
+ __ cmpl(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpl(ToRegister(left), ToOperand(right));
+ }
}
void LCodeGen::DoCmpID(LCmpID* instr) {
- Abort("Unimplemented: %s", "DoCmpID");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+
+ NearLabel unordered;
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the unordered case, which produces a false value.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, &unordered);
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ NearLabel done;
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+ __ j(cc, &done);
+
+ __ bind(&unordered);
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpIDAndBranch");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ EmitBranch(true_block, false_block, cc);
}
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Abort("Unimplemented: %s", "DoCmpJSObjectEq");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ Register result = ToRegister(instr->result());
+
+ NearLabel different, done;
+ __ cmpq(left, right);
+ __ j(not_equal, &different);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&different);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmpq(left, right);
+ EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoIsNull(LIsNull* instr) {
- Abort("Unimplemented: %s", "DoIsNull");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ // If the expression is known to be a smi, then it's
+ // definitely not null. Materialize false.
+ // Consider adding other type and representation tests too.
+ if (instr->hydrogen()->value()->type().IsSmi()) {
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ return;
+ }
+
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ if (instr->is_strict()) {
+ __ movl(result, Immediate(Heap::kTrueValueRootIndex));
+ NearLabel load;
+ __ j(equal, &load);
+ __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+ __ bind(&load);
+ __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ } else {
+ NearLabel true_value, false_value, done;
+ __ j(equal, &true_value);
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &true_value);
+ __ JumpIfSmi(reg, &false_value);
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ Register scratch = result;
+ __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &true_value);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ bind(&done);
+ }
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsNullAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ // If the expression is known to untagged or smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ // Jump directly to the false block.
+ EmitGoto(false_block);
+ return;
+ }
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ Cmp(reg, Factory::null_value());
+ if (instr->is_strict()) {
+ EmitBranch(true_block, false_block, equal);
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ __ j(equal, true_label);
+ __ Cmp(reg, Factory::undefined_value());
+ __ j(equal, true_label);
+ __ JumpIfSmi(reg, false_label);
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, not_zero);
+ }
}
Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
- Abort("Unimplemented: %s", "EmitIsObject");
+ ASSERT(!input.is(kScratchRegister));
+
+ __ JumpIfSmi(input, is_not_object);
+
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ j(equal, is_object);
+
+ __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, is_not_object);
+
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, is_not_object);
+ __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
- Abort("Unimplemented: %s", "DoIsObject");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
+ __ j(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsObjectAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsObject(reg, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Abort("Unimplemented: %s", "DoIsSmi");
+ LOperand* input_operand = instr->InputAt(0);
+ Register result = ToRegister(instr->result());
+ if (input_operand->IsRegister()) {
+ Register input = ToRegister(input_operand);
+ __ CheckSmiToIndicator(result, input);
+ } else {
+ Operand input = ToOperand(instr->InputAt(0));
+ __ CheckSmiToIndicator(result, input);
+ }
+ // result is zero if input is a smi, and one otherwise.
+ ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
+ __ movq(result, Operand(kRootRegister, result, times_pointer_size,
+ Heap::kTrueValueRootIndex * kPointerSize));
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsSmiAndBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Condition is_smi;
+ if (instr->InputAt(0)->IsRegister()) {
+ Register input = ToRegister(instr->InputAt(0));
+ is_smi = masm()->CheckSmi(input);
+ } else {
+ Operand input = ToOperand(instr->InputAt(0));
+ is_smi = masm()->CheckSmi(input);
+ }
+ EmitBranch(true_block, false_block, is_smi);
+}
+
+
+static InstanceType TestType(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return equal;
+ if (to == LAST_TYPE) return above_equal;
+ if (from == FIRST_TYPE) return below_equal;
+ UNREACHABLE();
+ return equal;
}
@@ -1078,7 +1445,17 @@
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ JumpIfSmi(input, false_label);
+
+ __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
@@ -1089,34 +1466,118 @@
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ testl(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ EmitBranch(true_block, false_block, not_equal);
}
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
+// Branches to a label or falls through with the answer in the z flag.
+// Trashes the temp register and possibly input (if it and temp are aliased).
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
- Handle<String>class_name,
+ Handle<String> class_name,
Register input,
- Register temp,
- Register temp2) {
- Abort("Unimplemented: %s", "EmitClassOfTest");
+ Register temp) {
+ __ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
+ __ j(equal, is_true);
+ } else {
+ __ j(equal, is_false);
+ }
+
+ // Check if the constructor in the map is a function.
+ __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
+ __ j(not_equal, is_true);
+ } else {
+ __ j(not_equal, is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(temp, FieldOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is a symbol because it's a literal.
+ // The name in the constructor is a symbol because of the way the context is
+ // booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are symbols it is sufficient to use an identity
+ // comparison.
+ ASSERT(class_name->IsSymbol());
+ __ Cmp(temp, class_name);
+ // End with the answer in the z flag.
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Abort("Unimplemented: %s", "DoClassOfTest");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ Register temp = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ NearLabel done;
+ Label is_true, is_false;
+
+ EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
+
+ __ j(not_equal, &is_false);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Abort("Unimplemented: %s", "DoClassOfTestAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ EmitClassOfTest(true_label, false_label, class_name, input, temp);
+
+ EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpMapAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+ int true_block = instr->true_block_id();
+ int false_block = instr->false_block_id();
+
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+ EmitBranch(true_block, false_block, equal);
}
@@ -1126,7 +1587,13 @@
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfAndBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ testq(rax, rax);
+ EmitBranch(true_block, false_block, zero);
}
@@ -1142,12 +1609,42 @@
void LCodeGen::DoCmpT(LCmpT* instr) {
- Abort("Unimplemented: %s", "DoCmpT");
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = TokenToCondition(op, false);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ NearLabel true_value, done;
+ __ testq(rax, rax);
+ __ j(condition, &true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpTAndBranch");
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ // The compare stub expects compare condition and the input operands
+ // reversed for GT and LTE.
+ Condition condition = TokenToCondition(op, false);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ __ testq(rax, rax);
+ EmitBranch(true_block, false_block, condition);
}
@@ -1160,17 +1657,46 @@
}
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((ParameterCount() + 1) * kPointerSize);
+ __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
}
void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
- Abort("Unimplemented: %s", "DoLoadGlobal");
+ Register result = ToRegister(instr->result());
+ if (result.is(rax)) {
+ __ load_rax(instr->hydrogen()->cell().location(),
+ RelocInfo::GLOBAL_PROPERTY_CELL);
+ } else {
+ __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(result, Operand(result, 0));
+ }
+ if (instr->hydrogen()->check_hole_value()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
}
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
- Abort("Unimplemented: %s", "DoStoreGlobal");
+ Register value = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(!value.is(temp));
+ bool check_hole = instr->hydrogen()->check_hole_value();
+ if (!check_hole && value.is(rax)) {
+ __ store_rax(instr->hydrogen()->cell().location(),
+ RelocInfo::GLOBAL_PROPERTY_CELL);
+ return;
+ }
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ if (check_hole) {
+ __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ __ movq(Operand(temp, 0), value);
}
@@ -1180,22 +1706,93 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedField");
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
+ } else {
+ __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
+ }
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ __ Move(rcx, instr->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function.
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Check whether the function has an instance prototype.
+ NearLabel non_instance;
+ __ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ movq(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ NearLabel done;
+ __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
+ __ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in the function's map.
+ __ bind(&non_instance);
+ __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Abort("Unimplemented: %s", "DoLoadElements");
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
+ if (FLAG_debug_code) {
+ NearLabel done;
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ j(equal, &done);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Factory::pixel_array_map());
+ __ j(equal, &done);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Factory::fixed_cow_array_map());
+ __ Check(equal, "Check for fast elements failed.");
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoLoadPixelArrayExternalPointer(
+ LLoadPixelArrayExternalPointer* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
}
@@ -1205,7 +1802,31 @@
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+ Register elements = ToRegister(instr->elements());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(elements));
+
+ // Load the result.
+ __ movq(result, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Check for the hole value.
+ __ Cmp(result, Factory::the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
+ Register external_elements = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(external_elements));
+
+ // Load the result.
+ __ movzxbq(result, Operand(external_elements, key, times_1, 0));
}
@@ -1230,29 +1851,88 @@
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- Abort("Unimplemented: %s", "DoPushArgument");
+ LOperand* argument = instr->InputAt(0);
+ if (argument->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(argument);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ push(Immediate(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else {
+ ASSERT(r.IsTagged());
+ __ Push(literal);
+ }
+ } else if (argument->IsRegister()) {
+ __ push(ToRegister(argument));
+ } else {
+ ASSERT(!argument->IsDoubleRegister());
+ __ push(ToOperand(argument));
+ }
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Abort("Unimplemented: %s", "DoGlobalObject");
+ Register result = ToRegister(instr->result());
+ __ movq(result, GlobalObjectOperand());
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Abort("Unimplemented: %s", "DoGlobalReceiver");
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
}
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr) {
- Abort("Unimplemented: %s", "CallKnownFunction");
+ // Change context if needed.
+ bool change_context =
+ (graph()->info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ }
+
+ // Set rax to arguments count if adaption is not needed. Assumes that rax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ Set(rax, arity);
+ }
+
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ // Invoke function.
+ if (*function == *graph()->info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Setup deoptimization.
+ RegisterLazyDeoptimization(instr);
+
+ // Restore context.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- Abort("Unimplemented: %s", "DoCallConstantFunction");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ __ Move(rdi, instr->function());
+ CallKnownFunction(instr->function(), instr->arity(), instr);
}
@@ -1317,7 +1997,13 @@
void LCodeGen::DoCallNamed(LCallNamed* instr) {
- Abort("Unimplemented: %s", "DoCallNamed");
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ __ Move(rcx, instr->name());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -1327,17 +2013,29 @@
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallGlobal");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ int arity = instr->arity();
+ Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ __ Move(rcx, instr->name());
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallKnownGlobal");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ __ Move(rdi, instr->target());
+ CallKnownFunction(instr->target(), instr->arity(), instr);
}
void LCodeGen::DoCallNew(LCallNew* instr) {
- Abort("Unimplemented: %s", "DoCallNew");
+ ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Set(rax, instr->arity());
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -1347,7 +2045,32 @@
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedField");
+ Register object = ToRegister(instr->object());
+ Register value = ToRegister(instr->value());
+ int offset = instr->offset();
+
+ if (!instr->transition().is_null()) {
+ __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ }
+
+ // Do the store.
+ if (instr->is_in_object()) {
+ __ movq(FieldOperand(object, offset), value);
+ if (instr->needs_write_barrier()) {
+ Register temp = ToRegister(instr->TempAt(0));
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWrite(object, offset, value, temp);
+ }
+ } else {
+ Register temp = ToRegister(instr->TempAt(0));
+ __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(temp, offset), value);
+ if (instr->needs_write_barrier()) {
+ // Update the write barrier for the properties array.
+ // object is used as a scratch register.
+ __ RecordWrite(temp, offset, value, object);
+ }
+ }
}
@@ -1357,12 +2080,43 @@
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Abort("Unimplemented: %s", "DoBoundsCheck");
+ if (instr->length()->IsRegister()) {
+ __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
+ } else {
+ __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
+ }
+ DeoptimizeIf(above_equal, instr->environment());
}
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(FieldOperand(elements, offset), value);
+ } else {
+ __ movq(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ value);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Compute address of modified element and store it into key register.
+ __ lea(key, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ RecordWrite(elements, key, value);
+ }
}
@@ -1372,54 +2126,186 @@
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- Abort("Unimplemented: %s", "DoInteger32ToDouble");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- Abort("Unimplemented: %s", "DoNumberTagI");
-}
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
-
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
- Abort("Unimplemented: %s", "DoDeferredNumberTagI");
+ __ Integer32ToSmi(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- Abort("Unimplemented: %s", "DoNumberTagD");
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->TempAt(0));
+
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, deferred->entry());
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- Abort("Unimplemented: %s", "DoDeferredNumberTagD");
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Smi::FromInt(0));
+
+ __ PushSafepointRegisters();
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Ensure that value in rax survives popping registers.
+ __ movq(kScratchRegister, rax);
+ __ PopSafepointRegisters();
+ __ movq(reg, kScratchRegister);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- Abort("Unimplemented: %s", "DoSmiTag");
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+ __ Integer32ToSmi(input, input);
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Abort("Unimplemented: %s", "DoSmiUntag");
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ if (instr->needs_check()) {
+ Condition is_smi = __ CheckSmi(input);
+ DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ }
+ __ SmiToInteger32(input, input);
}
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
LEnvironment* env) {
- Abort("Unimplemented: %s", "EmitNumberUntagD");
+ NearLabel load_smi, heap_number, done;
+
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi);
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &heap_number);
+
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ // Convert undefined to NaN. Compute NaN as 0/0.
+ __ xorpd(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done);
+
+ // Heap number to XMM conversion.
+ __ bind(&heap_number);
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ // Smi to XMM conversion
+ __ bind(&load_smi);
+ __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
+ __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ bind(&done);
}
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+ LTaggedToI* instr_;
+};
+
+
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Abort("Unimplemented: %s", "DoDeferredTaggedToI");
+ NearLabel done, heap_number;
+ Register input_reg = ToRegister(instr->InputAt(0));
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+
+ if (instr->truncating()) {
+ __ j(equal, &heap_number);
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movl(input_reg, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&heap_number);
+
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2siq(input_reg, xmm0);
+ __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ __ cmpl(input_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(not_equal, instr->environment());
+
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2si(input_reg, xmm0);
+ __ cvtlsi2sd(xmm_temp, input_reg);
+ __ ucomisd(xmm0, xmm_temp);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(input_reg, input_reg);
+ __ j(not_zero, &done);
+ __ movmskpd(input_reg, xmm0);
+ __ andl(input_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ }
+ __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- Abort("Unimplemented: %s", "DoTaggedToI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ ASSERT(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
}
@@ -1434,42 +2320,146 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- Abort("Unimplemented: %s", "DoCheckSmi");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ Condition cc = masm()->CheckSmi(ToRegister(input));
+ if (instr->condition() != equal) {
+ cc = NegateCondition(cc);
+ }
+ DeoptimizeIf(cc, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Abort("Unimplemented: %s", "DoCheckInstanceType");
+ Register input = ToRegister(instr->InputAt(0));
+ InstanceType first = instr->hydrogen()->first();
+ InstanceType last = instr->hydrogen()->last();
+
+ __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(first)));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
+ // String has a dedicated bit in instance type.
+ __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(kIsNotStringMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(first)));
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(last)));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
}
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Abort("Unimplemented: %s", "DoCheckFunction");
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
+ __ Cmp(reg, instr->hydrogen()->target());
+ DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
- Abort("Unimplemented: %s", "DoCheckMap");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ instr->hydrogen()->map());
+ DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- Abort("Unimplemented: %s", "LoadHeapObject");
+ if (Heap::InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ Factory::NewJSGlobalPropertyCell(object);
+ __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(result, Operand(result, 0));
+ } else {
+ __ Move(result, object);
+ }
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+ Register reg = ToRegister(instr->TempAt(0));
+
+ Handle<JSObject> holder = instr->holder();
+ Handle<JSObject> current_prototype = instr->prototype();
+
+ // Load prototype object.
+ LoadHeapObject(reg, current_prototype);
+
+ // Check prototype maps up to the holder.
+ while (!current_prototype.is_identical_to(holder)) {
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Handle<Map>(current_prototype->map()));
+ DeoptimizeIf(not_equal, instr->environment());
+ current_prototype =
+ Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+ // Load next prototype object.
+ LoadHeapObject(reg, current_prototype);
+ }
+
+ // Check the holder map.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Handle<Map>(current_prototype->map()));
+ DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Abort("Unimplemented: %s", "DoArrayLiteral");
+ // Setup the parameters to the stub/runtime call.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->constant_elements());
+
+ // Pick the right runtime function or stub to call.
+ int length = instr->hydrogen()->length();
+ if (instr->hydrogen()->IsCopyOnWrite()) {
+ ASSERT(instr->hydrogen()->depth() == 1);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ } else {
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Abort("Unimplemented: %s", "DoObjectLiteral");
+ // Setup the parameters to the stub/runtime call.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->constant_properties());
+ __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+
+ // Pick the right runtime function to call.
+ if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ } else {
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ }
}
@@ -1479,7 +2469,20 @@
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- Abort("Unimplemented: %s", "DoFunctionLiteral");
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (shared_info->num_literals() == 0 && !pretenure) {
+ FastNewClosureStub stub;
+ __ Push(shared_info);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(rsi);
+ __ Push(shared_info);
+ __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
}
@@ -1493,8 +2496,67 @@
}
+void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register result = ToRegister(instr->result());
+ NearLabel true_label;
+ NearLabel false_label;
+ NearLabel done;
+
+ EmitIsConstructCall(result);
+ __ j(equal, &true_label);
+
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&true_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ NearLabel check_frame_marker;
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+}
+
+
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Abort("Unimplemented: %s", "DoTypeofIsAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition final_branch_condition = EmitTypeofIs(true_label,
+ false_label,
+ input,
+ instr->type_literal());
+
+ EmitBranch(true_block, false_block, final_branch_condition);
}
@@ -1502,8 +2564,63 @@
Label* false_label,
Register input,
Handle<String> type_name) {
- Abort("Unimplemented: %s", "EmitTypeofIs");
- return no_condition;
+ Condition final_branch_condition = no_condition;
+ if (type_name->Equals(Heap::number_symbol())) {
+ __ JumpIfSmi(input, true_label);
+ __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, false_label);
+ __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
+ final_branch_condition = below;
+
+ } else if (type_name->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+ __ j(equal, true_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ final_branch_condition = not_zero;
+
+ } else if (type_name->Equals(Heap::function_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ final_branch_condition = above_equal;
+
+ } else if (type_name->Equals(Heap::object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ Cmp(input, Factory::null_value());
+ __ j(equal, true_label);
+ // Check for undetectable objects => false.
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, false_label);
+ // Check for JS objects that are not RegExp or Function => true.
+ __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
+ final_branch_condition = below_equal;
+
+ } else {
+ final_branch_condition = never;
+ __ jmp(false_label);
+ }
+
+ return final_branch_condition;
}
@@ -1526,7 +2643,6 @@
void LCodeGen::DoStackCheck(LStackCheck* instr) {
// Perform stack overflow check.
NearLabel done;
- ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 8d1c5c4..6f8f06e 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -34,37 +34,15 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
-class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -80,10 +58,24 @@
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ Register ToRegister(LOperand* op) const;
+ XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ int ToInteger32(LConstantOperand* op) const;
+ bool IsTaggedConstant(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op) const;
+
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -95,7 +87,6 @@
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
@@ -129,7 +120,6 @@
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -138,8 +128,7 @@
Label* if_false,
Handle<String> class_name,
Register input,
- Register temporary,
- Register temporary2);
+ Register temporary);
int StackSlotCount() const { return chunk()->spill_slot_count(); }
int ParameterCount() const { return scope()->num_parameters(); }
@@ -191,13 +180,6 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
@@ -210,6 +192,10 @@
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
@@ -232,11 +218,13 @@
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
new file mode 100644
index 0000000..cedd025
--- /dev/null
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -0,0 +1,320 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "x64/lithium-gap-resolver-x64.h"
+#include "x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack-allocated local. Recursion may allow
+ // multiple moves to be pending.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ moves_[index].Eliminate();
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ Register src = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movq(dst, src);
+ }
+
+ } else if (source->IsStackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32Constant(constant_source)) {
+ __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ if (cgen_->IsInteger32Constant(constant_source)) {
+ // Allow top 32 bits of an untagged Integer32 to be arbitrary.
+ __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ movsd(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ movsd(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ movsd(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ movsd(xmm0, src);
+ __ movsd(cgen_->ToOperand(destination), xmm0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Swap two general-purpose registers.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Swap a general-purpose register and a stack slot.
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ __ movq(kScratchRegister, mem);
+ __ movq(mem, reg);
+ __ movq(reg, kScratchRegister);
+
+ } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+ (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
+ // Swap two stack slots or two double stack slots.
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movq(kScratchRegister, dst);
+ __ movsd(dst, xmm0);
+ __ movq(src, kScratchRegister);
+
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // Swap two double registers.
+ XMMRegister source_reg = cgen_->ToDoubleRegister(source);
+ XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
+ __ movsd(xmm0, source_reg);
+ __ movsd(source_reg, destination_reg);
+ __ movsd(destination_reg, xmm0);
+
+ } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+ // Swap a double register and a double stack slot.
+ ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
+ (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
+ XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+ ? source
+ : destination);
+ LOperand* other = source->IsDoubleRegister() ? destination : source;
+ ASSERT(other->IsDoubleStackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movsd(xmm0, other_operand);
+ __ movsd(other_operand, reg);
+ __ movsd(reg, xmm0);
+
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ moves_[index].Eliminate();
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-gap-resolver-x64.h b/src/x64/lithium-gap-resolver-x64.h
new file mode 100644
index 0000000..d828455
--- /dev/null
+++ b/src/x64/lithium-gap-resolver-x64.h
@@ -0,0 +1,74 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 5ef6eb7..a6afbf7 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
+#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
@@ -68,11 +69,33 @@
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs must use a fixed register.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -162,6 +185,12 @@
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@@ -262,7 +291,8 @@
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
}
@@ -318,7 +348,7 @@
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
@@ -386,7 +416,7 @@
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -402,7 +432,6 @@
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -653,16 +682,16 @@
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
+ instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
+ instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
@@ -670,7 +699,10 @@
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -695,7 +727,7 @@
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -740,8 +772,72 @@
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- Abort("Unimplemented: %s", "DoBit");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+ ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+ HValue* right_value = instr->OperandAt(1);
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseFixed(right_value, rcx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool can_deopt = (op == Token::SHR && constant_value == 0);
+ if (can_deopt) {
+ bool can_truncate = true;
+ for (int i = 0; i < instr->uses()->length(); i++) {
+ if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+ can_truncate = false;
+ break;
+ }
+ }
+ can_deopt = !can_truncate;
+ }
+
+ LShiftI* result = new LShiftI(op, left, right, can_deopt);
+ return can_deopt
+ ? AssignEnvironment(DefineSameAsFirst(result))
+ : DefineSameAsFirst(result);
}
@@ -836,7 +932,6 @@
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -847,26 +942,19 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch() && !instr->IsGoto()) {
- // TODO(fschneider): Handle branch instructions uniformly like
- // other instructions. This requires us to generate the right
- // branch instruction already at the HIR level.
+ if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
- HBranch* branch = HBranch::cast(current);
- instr->set_hydrogen_value(branch->value());
- HBasicBlock* first = branch->FirstSuccessor();
- HBasicBlock* second = branch->SecondSuccessor();
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -912,16 +1000,108 @@
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- Abort("Unimplemented: %s", "DoBranch");
- return NULL;
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+ HValue* v = instr->value();
+ if (v->EmitAtUses()) {
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ Token::Value op = compare->token();
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
+ } else if (r.IsDouble()) {
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
+ } else {
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ bool reversed = op == Token::GT || op == Token::LTE;
+ LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
+ LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
+ LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
+ right_operand);
+ return MarkAsCall(result, instr);
+ }
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+ temp);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsCompareJSObjectEq()) {
+ HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+ return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsInstanceOf()) {
+ HInstanceOf* instance_of = HInstanceOf::cast(v);
+ LInstanceOfAndBranch* result =
+ new LInstanceOfAndBranch(
+ UseFixed(instance_of->left(), InstanceofStub::left()),
+ UseFixed(instance_of->right(), InstanceofStub::right()));
+ return MarkAsCall(result, instr);
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
+ } else {
+ if (v->IsConstant()) {
+ if (HConstant::cast(v)->handle()->IsTrue()) {
+ return new LGoto(instr->FirstSuccessor()->block_id());
+ } else if (HConstant::cast(v)->handle()->IsFalse()) {
+ return new LGoto(instr->SecondSuccessor()->block_id());
+ }
+ }
+ Abort("Undefined compare before branch");
+ return NULL;
+ }
+ }
+ return new LBranch(UseRegisterAtStart(v));
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCompareMapAndBranch");
- return NULL;
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new LCmpMapAndBranch(value);
}
@@ -957,27 +1137,37 @@
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- Abort("Unimplemented: %s", "DoPushArgument");
+ ++argument_count_;
+ LOperand* argument = UseOrConstant(instr->argument());
+ return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ Abort("Unimplemented: DoOuterContext");
return NULL;
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- Abort("Unimplemented: %s", "DoGlobalObject");
- return NULL;
+ return DefineAsRegister(new LGlobalObject);
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- Abort("Unimplemented: %s", "DoGlobalReceiver");
- return NULL;
+ return DefineAsRegister(new LGlobalReceiver);
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- Abort("Unimplemented: %s", "DoCallConstantFunction");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
}
@@ -994,26 +1184,28 @@
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- Abort("Unimplemented: %s", "DoCallNamed");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallGlobal");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallKnownGlobal");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- Abort("Unimplemented: %s", "DoCallNew");
- return NULL;
+ LOperand* constructor = UseFixed(instr->constructor(), rdi);
+ argument_count_ -= instr->argument_count();
+ LCallNew* result = new LCallNew(constructor);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1024,56 +1216,65 @@
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- Abort("Unimplemented: %s", "DoCallRuntime");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- Abort("Unimplemented: %s", "DoShr");
- return NULL;
+ return DoShift(Token::SHR, instr);
}
LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- Abort("Unimplemented: %s", "DoSar");
- return NULL;
+ return DoShift(Token::SAR, instr);
}
LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- Abort("Unimplemented: %s", "DoShl");
- return NULL;
+ return DoShift(Token::SHL, instr);
}
LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- Abort("Unimplemented: %s", "DoBitAnd");
- return NULL;
+ return DoBit(Token::BIT_AND, instr);
}
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- Abort("Unimplemented: %s", "DoBitNot");
- return NULL;
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LBitNotI* result = new LBitNotI(input);
+ return DefineSameAsFirst(result);
}
LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- Abort("Unimplemented: %s", "DoBitOr");
- return NULL;
+ return DoBit(Token::BIT_OR, instr);
}
LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- Abort("Unimplemented: %s", "DoBitXor");
- return NULL;
+ return DoBit(Token::BIT_XOR, instr);
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- Abort("Unimplemented: %s", "DoDiv");
- return NULL;
+ if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else if (instr->representation().IsInteger32()) {
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into rdx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* result = new LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineFixed(result, rax));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::DIV, instr);
+ }
}
@@ -1084,14 +1285,40 @@
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- Abort("Unimplemented: %s", "DoMul");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LMulI* mul = new LMulI(left, right);
+ return AssignEnvironment(DefineSameAsFirst(mul));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::MUL, instr);
+ }
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- Abort("Unimplemented: %s", "DoSub");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new LSubI(left, right);
+ LInstruction* result = DefineSameAsFirst(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::SUB, instr);
+ }
}
@@ -1124,33 +1351,62 @@
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
- Abort("Unimplemented: %s", "DoCompare");
- return NULL;
+ Token::Value op = instr->token();
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return DefineAsRegister(new LCmpID(left, right));
+ } else if (r.IsDouble()) {
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return DefineAsRegister(new LCmpID(left, right));
+ } else {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+ LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
}
LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) {
- Abort("Unimplemented: %s", "DoCompareJSObjectEq");
- return NULL;
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- Abort("Unimplemented: %s", "DoIsNull");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsNull(value));
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
- Abort("Unimplemented: %s", "DoIsObject");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+
+ return DefineAsRegister(new LIsObject(value));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- Abort("Unimplemented: %s", "DoIsSmi");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseAtStart(instr->value());
+
+ return DefineAsRegister(new LIsSmi(value));
}
@@ -1174,14 +1430,20 @@
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- Abort("Unimplemented: %s", "DoJSArrayLength");
- return NULL;
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
- Abort("Unimplemented: %s", "DoFixedArrayLength");
- return NULL;
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LFixedArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LPixelArrayLength(array));
}
@@ -1192,56 +1454,121 @@
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- Abort("Unimplemented: %s", "DoBoundsCheck");
+ return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+ Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
return NULL;
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- Abort("Unimplemented: %s", "DoThrow");
- return NULL;
+ LOperand* value = UseFixed(instr->value(), rax);
+ return MarkAsCall(new LThrow(value), instr);
}
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Abort("Unimplemented: %s", "DoChange");
+ Representation from = instr->from();
+ Representation to = instr->to();
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberUntagD* res = new LNumberUntagD(value);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ bool needs_check = !instr->value()->type().IsSmi();
+ if (needs_check) {
+ LOperand* xmm_temp =
+ (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ ? NULL
+ : FixedTemp(xmm1);
+ LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ } else {
+ return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new LNumberTagD(value, temp);
+ return AssignPointerMap(Define(result, result_temp));
+ } else {
+ ASSERT(to.IsInteger32());
+ bool needs_temp = instr->CanTruncateToInt32() &&
+ !CpuFeatures::IsSupported(SSE3);
+ LOperand* value = needs_temp ?
+ UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ }
+ } else if (from.IsInteger32()) {
+ if (to.IsTagged()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return DefineSameAsFirst(new LSmiTag(value));
+ } else {
+ LNumberTagI* result = new LNumberTagI(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+ }
+ }
+ UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- Abort("Unimplemented: %s", "DoCheckNonSmi");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckSmi(value, zero));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- Abort("Unimplemented: %s", "DoCheckInstanceType");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LCheckInstanceType* result = new LCheckInstanceType(value);
+ return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
- return NULL;
+ LOperand* temp = TempRegister();
+ LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+ return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- Abort("Unimplemented: %s", "DoCheckSmi");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckSmi(value, not_zero));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- Abort("Unimplemented: %s", "DoCheckFunction");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
- Abort("Unimplemented: %s", "DoCheckMap");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LCheckMap* result = new LCheckMap(value);
+ return AssignEnvironment(result);
}
@@ -1253,14 +1580,12 @@
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- int32_t value = instr->Integer32Value();
- return DefineAsRegister(new LConstantI(value));
+ return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
- double value = instr->DoubleValue();
LOperand* temp = TempRegister();
- return DefineAsRegister(new LConstantD(value, temp));
+ return DefineAsRegister(new LConstantD(temp));
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT(instr->handle()));
+ return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1269,14 +1594,17 @@
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- Abort("Unimplemented: %s", "DoLoadGlobal");
- return NULL;
+ LLoadGlobal* result = new LLoadGlobal;
+ return instr->check_hole_value()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- Abort("Unimplemented: %s", "DoStoreGlobal");
- return NULL;
+ LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
+ TempRegister());
+ return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
@@ -1286,35 +1614,67 @@
}
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedField");
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ Abort("Unimplemented: DoStoreContextSlot");
return NULL;
}
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ ASSERT(instr->representation().IsTagged());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new LLoadNamedField(obj));
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rax);
+ LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
- Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
- return NULL;
+ return AssignEnvironment(DefineAsRegister(
+ new LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- Abort("Unimplemented: %s", "DoLoadElements");
- return NULL;
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
+ HLoadPixelArrayExternalPointer* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
- return NULL;
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
+ HLoadPixelArrayElement* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* external_pointer =
+ UseRegisterAtStart(instr->external_pointer());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadPixelArrayElement* result =
+ new LLoadPixelArrayElement(external_pointer, key);
+ return DefineSameAsFirst(result);
}
@@ -1326,8 +1686,20 @@
LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
- return NULL;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+
+ return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
}
@@ -1338,8 +1710,22 @@
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedField");
- return NULL;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = needs_write_barrier
+ ? UseTempRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+
+ // We only need a scratch register if we have a write barrier or we
+ // have a store into the properties array (not in-object-property).
+ LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+ ? TempRegister() : NULL;
+
+ return new LStoreNamedField(obj, val, temp);
}
@@ -1349,18 +1735,28 @@
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- Abort("Unimplemented: %s", "DoArrayLiteral");
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ Abort("Unimplemented: %s", "DoStringCharCodeAt");
return NULL;
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- Abort("Unimplemented: %s", "DoObjectLiteral");
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ Abort("Unimplemented: %s", "DoStringLength");
return NULL;
}
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
Abort("Unimplemented: %s", "DoRegExpLiteral");
return NULL;
@@ -1368,8 +1764,7 @@
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- Abort("Unimplemented: %s", "DoFunctionLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
}
@@ -1398,8 +1793,8 @@
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- Abort("Unimplemented: %s", "DoCallStub");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallStub, rax), instr);
}
@@ -1426,6 +1821,12 @@
return NULL;
}
+
+LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
+ return DefineAsRegister(new LIsConstructCall);
+}
+
+
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
@@ -1448,7 +1849,7 @@
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LLazyBailout* lazy_bailout = new LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
- instructions_pending_deoptimization_environment_->
+ instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
return result;
@@ -1464,13 +1865,21 @@
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- Abort("Unimplemented: %s", "DoEnterInlined");
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->function(),
+ false,
+ undefined);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
return NULL;
}
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- Abort("Unimplemented: %s", "DoLeaveInlined");
+ HEnvironment* outer = current_block_->last_environment()->outer();
+ current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 17d9dda..0cb5cc7 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -39,119 +39,8 @@
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LTemplateInstruction
-// LControlInstruction
-// LBranch
-// LClassOfTestAndBranch
-// LCmpJSObjectEqAndBranch
-// LCmpIDAndBranch
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceTypeAndBranch
-// LInstanceOfAndBranch
-// LIsNullAndBranch
-// LIsObjectAndBranch
-// LIsSmiAndBranch
-// LTypeofIsAndBranch
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpJSObjectEq
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LPower
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGap
-// LLabel
-// LGlobalObject
-// LGlobalReceiver
-// LGoto
-// LLazyBailout
-// LLoadGlobal
-// LCheckPrototypeMaps
-// LLoadContextSlot
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LBitNotI
-// LCallNew
-// LCheckFunction
-// LCheckPrototypeMaps
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LDeleteProperty
-// LDoubleToI
-// LFixedArrayLength
-// LHasCachedArrayIndex
-// LHasInstanceType
-// LInteger32ToDouble
-// LIsNull
-// LIsObject
-// LIsSmi
-// LJSArrayLength
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
- V(Constant) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
@@ -195,6 +84,7 @@
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
@@ -232,6 +122,8 @@
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
+ V(LoadPixelArrayElement) \
+ V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -240,6 +132,7 @@
V(ObjectLiteral) \
V(OsrEntry) \
V(Parameter) \
+ V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -259,6 +152,8 @@
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@@ -287,7 +182,11 @@
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
+
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -304,16 +203,14 @@
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -327,41 +224,73 @@
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
-template<typename T, int N>
+template<typename ElementType, int NumElements>
class OperandContainer {
public:
OperandContainer() {
- for (int i = 0; i < N; i++) elems_[i] = NULL;
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
}
- int length() { return N; }
- T& operator[](int i) {
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
}
void PrintOperandsTo(StringStream* stream);
private:
- T elems_[N];
+ ElementType elems_[NumElements];
};
-template<typename T>
-class OperandContainer<T, 0> {
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
-template<int R, int I, int T = 0>
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
@@ -512,7 +441,7 @@
};
-template<int I, int T = 0>
+template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
DECLARE_INSTRUCTION(ControlInstruction)
@@ -570,7 +499,7 @@
};
-class LArgumentsLength: public LTemplateInstruction<1, 1> {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -614,12 +543,11 @@
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI: public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
@@ -627,7 +555,7 @@
};
-class LCmpID: public LTemplateInstruction<1, 2> {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -644,7 +572,7 @@
};
-class LCmpIDAndBranch: public LControlInstruction<2> {
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -663,7 +591,7 @@
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value;
@@ -677,7 +605,7 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -688,7 +616,7 @@
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -700,7 +628,7 @@
};
-class LIsNull: public LTemplateInstruction<1, 1> {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
@@ -729,23 +657,20 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ explicit LIsObject(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@@ -754,7 +679,7 @@
};
-class LIsSmi: public LTemplateInstruction<1, 1> {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
@@ -765,7 +690,7 @@
};
-class LIsSmiAndBranch: public LControlInstruction<1> {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -777,7 +702,7 @@
};
-class LHasInstanceType: public LTemplateInstruction<1, 1> {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -788,11 +713,10 @@
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
@@ -803,7 +727,7 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -814,7 +738,7 @@
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -840,12 +764,11 @@
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -856,7 +779,7 @@
};
-class LCmpT: public LTemplateInstruction<1, 2> {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -870,7 +793,7 @@
};
-class LCmpTAndBranch: public LControlInstruction<2> {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -884,7 +807,7 @@
};
-class LInstanceOf: public LTemplateInstruction<1, 2> {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -895,7 +818,7 @@
};
-class LInstanceOfAndBranch: public LControlInstruction<2> {
+class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -935,7 +858,7 @@
};
-class LBitI: public LTemplateInstruction<1, 2> {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -952,7 +875,7 @@
};
-class LShiftI: public LTemplateInstruction<1, 2> {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -972,7 +895,7 @@
};
-class LSubI: public LTemplateInstruction<1, 2> {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -984,51 +907,37 @@
};
-template <int temp_count>
-class LConstant: public LTemplateInstruction<1, 0, temp_count> {
- DECLARE_INSTRUCTION(Constant)
-};
-
-
-class LConstantI: public LConstant<0> {
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantI(int32_t value) : value_(value) { }
- int32_t value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- int32_t value_;
+ int32_t value() const { return hydrogen()->Integer32Value(); }
};
-class LConstantD: public LConstant<1> {
+class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
- explicit LConstantD(double value, LOperand* temp) : value_(value) {
+ explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
}
- double value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- double value_;
+ double value() const { return hydrogen()->DoubleValue(); }
};
-class LConstantT: public LConstant<0> {
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantT(Handle<Object> value) : value_(value) { }
- Handle<Object> value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- Handle<Object> value_;
+ Handle<Object> value() const { return hydrogen()->handle(); }
};
-class LBranch: public LControlInstruction<1> {
+class LBranch: public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1041,28 +950,28 @@
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
};
-class LJSArrayLength: public LTemplateInstruction<1, 1> {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
@@ -1073,7 +982,18 @@
};
-class LFixedArrayLength: public LTemplateInstruction<1, 1> {
+class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LPixelArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+};
+
+
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value;
@@ -1096,7 +1016,7 @@
};
-class LThrow: public LTemplateInstruction<0, 1> {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1106,7 +1026,7 @@
};
-class LBitNotI: public LTemplateInstruction<1, 1> {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
@@ -1116,7 +1036,7 @@
};
-class LAddI: public LTemplateInstruction<1, 2> {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1128,7 +1048,7 @@
};
-class LPower: public LTemplateInstruction<1, 2> {
+class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1140,7 +1060,7 @@
};
-class LArithmeticD: public LTemplateInstruction<1, 2> {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1158,7 +1078,7 @@
};
-class LArithmeticT: public LTemplateInstruction<1, 2> {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1176,7 +1096,7 @@
};
-class LReturn: public LTemplateInstruction<0, 1> {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
@@ -1186,7 +1106,7 @@
};
-class LLoadNamedField: public LTemplateInstruction<1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1197,7 +1117,7 @@
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1211,11 +1131,10 @@
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
@@ -1225,7 +1144,7 @@
};
-class LLoadElements: public LTemplateInstruction<1, 1> {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
@@ -1235,7 +1154,18 @@
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
+class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
+ "load-pixel-array-external-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1250,7 +1180,23 @@
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
+class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
+ "load-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1271,10 +1217,11 @@
};
-class LStoreGlobal: public LTemplateInstruction<0, 1> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobal(LOperand* value) {
+ explicit LStoreGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
@@ -1282,19 +1229,23 @@
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
-class LPushArgument: public LTemplateInstruction<0, 1> {
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1304,6 +1255,12 @@
};
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1328,10 +1285,10 @@
};
-class LCallKeyed: public LTemplateInstruction<1, 0, 1> {
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallKeyed(LOperand* temp) {
- temps_[0] = temp;
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
@@ -1388,7 +1345,7 @@
};
-class LCallNew: public LTemplateInstruction<1, 1> {
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1413,7 +1370,7 @@
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1423,7 +1380,7 @@
};
-class LNumberTagI: public LTemplateInstruction<1, 1> {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1474,7 +1431,7 @@
};
-class LSmiTag: public LTemplateInstruction<1, 1> {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1484,7 +1441,7 @@
};
-class LNumberUntagD: public LTemplateInstruction<1, 1> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -1494,7 +1451,7 @@
};
-class LSmiUntag: public LTemplateInstruction<1, 1> {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1593,7 +1550,7 @@
};
-class LCheckFunction: public LTemplateInstruction<0, 1> {
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
@@ -1604,11 +1561,10 @@
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
+ explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
@@ -1616,7 +1572,7 @@
};
-class LCheckMap: public LTemplateInstruction<0, 1> {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
@@ -1641,7 +1597,7 @@
};
-class LCheckSmi: public LTemplateInstruction<0, 1> {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) {
@@ -1690,7 +1646,7 @@
};
-class LTypeof: public LTemplateInstruction<1, 1> {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -1700,7 +1656,7 @@
};
-class LTypeofIs: public LTemplateInstruction<1, 1> {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
@@ -1715,7 +1671,7 @@
};
-class LTypeofIsAndBranch: public LControlInstruction<1> {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1730,7 +1686,25 @@
};
-class LDeleteProperty: public LTemplateInstruction<1, 2> {
+class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
+ DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1783,7 +1757,7 @@
pointer_maps_(8),
inlined_closures_(1) { }
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
@@ -1849,7 +1823,7 @@
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
- instructions_pending_deoptimization_environment_(NULL),
+ instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
@@ -1900,30 +1874,30 @@
MUST_USE_RESULT LOperand* UseRegister(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
- // A value in a register that may be trashed.
+ // An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
- // An operand value in a register or stack slot.
+ // An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
- // An operand value in a register, stack slot or a constant operand.
+ // An input operand in a register, stack slot or a constant operand.
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
- // An operand value in a register or a constant operand.
+ // An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
- // An operand value in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- LOperand* UseAny(HValue* value);
-
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
template<int I, int T>
@@ -1983,7 +1957,7 @@
int argument_count_;
LAllocator* allocator_;
int position_;
- LInstruction* instructions_pending_deoptimization_environment_;
+ LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f95755d..56a2d6f 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -68,7 +68,9 @@
}
-void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
}
@@ -375,6 +377,16 @@
}
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ Runtime::Function* function = Runtime::FunctionForId(id);
+ Set(rax, function->nargs);
+ movq(rbx, ExternalReference(function));
+ CEntryStub ces(1);
+ ces.SaveDoubles();
+ CallStub(&ces);
+}
+
+
MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
int num_arguments) {
return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
@@ -885,6 +897,13 @@
}
+Condition MacroAssembler::CheckSmi(const Operand& src) {
+ ASSERT_EQ(0, kSmiTag);
+ testb(src, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
// Make mask 0x8000000000000001 and test that both bits are zero.
@@ -960,6 +979,27 @@
}
+void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
+ if (dst.is(src)) {
+ andl(dst, Immediate(kSmiTagMask));
+ } else {
+ movl(dst, Immediate(kSmiTagMask));
+ andl(dst, src);
+ }
+}
+
+
+void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
+ if (!(src.AddressUsesRegister(dst))) {
+ movl(dst, Immediate(kSmiTagMask));
+ andl(dst, src);
+ } else {
+ movl(dst, src);
+ andl(dst, Immediate(kSmiTagMask));
+ }
+}
+
+
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1386,6 +1426,68 @@
}
+void MacroAssembler::Pushad() {
+ push(rax);
+ push(rcx);
+ push(rdx);
+ push(rbx);
+ // Not pushing rsp or rbp.
+ push(rsi);
+ push(rdi);
+ push(r8);
+ push(r9);
+ // r10 is kScratchRegister.
+ push(r11);
+ push(r12);
+ // r13 is kRootRegister.
+ push(r14);
+ // r15 is kSmiConstantRegister
+}
+
+
+void MacroAssembler::Popad() {
+ pop(r14);
+ pop(r12);
+ pop(r11);
+ pop(r9);
+ pop(r8);
+ pop(rdi);
+ pop(rsi);
+ pop(rbx);
+ pop(rdx);
+ pop(rcx);
+ pop(rax);
+}
+
+
+void MacroAssembler::Dropad() {
+ const int kRegistersPushedByPushad = 11;
+ addq(rsp, Immediate(kRegistersPushedByPushad * kPointerSize));
+}
+
+
+// Order general registers are pushed by Pushad:
+// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+ 0,
+ 1,
+ 2,
+ 3,
+ -1,
+ -1,
+ 4,
+ 5,
+ 6,
+ 7,
+ -1,
+ 8,
+ 9,
+ -1,
+ 10,
+ -1
+};
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@@ -1439,6 +1541,18 @@
}
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+ if (is_uint16(bytes_dropped)) {
+ ret(bytes_dropped);
+ } else {
+ pop(scratch);
+ addq(rsp, Immediate(bytes_dropped));
+ push(scratch);
+ ret(0);
+ }
+}
+
+
void MacroAssembler::FCmp() {
fucomip();
fstp(0);
@@ -1670,10 +1784,18 @@
Move(rdi, Handle<JSFunction>(function));
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ if (V8::UseCrankshaft()) {
+ // Since Crankshaft can recompile a function, we need to load
+ // the Code object every time we call the function.
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(rdx, expected, actual, flag);
+ } else {
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ }
}
@@ -1734,12 +1856,24 @@
}
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space) {
+void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
+ bool save_doubles) {
#ifdef _WIN64
- const int kShaddowSpace = 4;
- arg_stack_space += kShaddowSpace;
+ const int kShadowSpace = 4;
+ arg_stack_space += kShadowSpace;
#endif
- if (arg_stack_space > 0) {
+ // Optionally save all XMM registers.
+ if (save_doubles) {
+ CpuFeatures::Scope scope(SSE2);
+ int space = XMMRegister::kNumRegisters * kDoubleSize +
+ arg_stack_space * kPointerSize;
+ subq(rsp, Immediate(space));
+ int offset = -2 * kPointerSize;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ XMMRegister reg = XMMRegister::FromAllocationIndex(i);
+ movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ }
+ } else if (arg_stack_space > 0) {
subq(rsp, Immediate(arg_stack_space * kPointerSize));
}
@@ -1756,7 +1890,7 @@
}
-void MacroAssembler::EnterExitFrame(int arg_stack_space) {
+void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
@@ -1764,25 +1898,31 @@
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, r14, times_pointer_size, offset));
- EnterExitFrameEpilogue(arg_stack_space);
+ EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
EnterExitFramePrologue(false);
- EnterExitFrameEpilogue(arg_stack_space);
+ EnterExitFrameEpilogue(arg_stack_space, false);
}
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Registers:
// r12 : argv
-
+ if (save_doubles) {
+ int offset = -2 * kPointerSize;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ XMMRegister reg = XMMRegister::FromAllocationIndex(i);
+ movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ }
+ }
// Get the return address from the stack and restore the frame pointer.
movq(rcx, Operand(rbp, 1 * kPointerSize));
movq(rbp, Operand(rbp, 0 * kPointerSize));
- // Pop everything up to and including the arguments and the receiver
+ // Drop everything up to and including the arguments and the receiver
// from the caller stack.
lea(rsp, Operand(r12, 1 * kPointerSize));
@@ -1970,11 +2110,11 @@
Register top_reg = result_end.is_valid() ? result_end : result;
- if (top_reg.is(result)) {
- addq(top_reg, Immediate(object_size));
- } else {
- lea(top_reg, Operand(result, object_size));
+ if (!top_reg.is(result)) {
+ movq(top_reg, result);
}
+ addq(top_reg, Immediate(object_size));
+ j(carry, gc_required);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(top_reg, Operand(kScratchRegister, 0));
j(above, gc_required);
@@ -2024,7 +2164,12 @@
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
- lea(result_end, Operand(result, element_count, element_size, header_size));
+
+ // We assume that element_count*element_size + header_size does not
+ // overflow.
+ lea(result_end, Operand(element_count, element_size, header_size));
+ addq(result_end, result);
+ j(carry, gc_required);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
@@ -2070,6 +2215,7 @@
movq(result_end, object_size);
}
addq(result_end, result);
+ j(carry, gc_required);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 30b9ba5..1002635 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -74,7 +74,7 @@
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(Operand with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
void StoreRoot(Register source, Heap::RootListIndex index);
@@ -152,7 +152,7 @@
//
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0);
+ void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
// Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
// memory (not GCed) on the stack accessible via StackSpaceOperand.
@@ -161,20 +161,20 @@
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame();
+ void LeaveExitFrame(bool save_doubles = false);
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
void LeaveApiExitFrame();
// Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { UNIMPLEMENTED(); }
- void PopSafepointRegisters() { UNIMPLEMENTED(); }
+ void PushSafepointRegisters() { Pushad(); }
+ void PopSafepointRegisters() { Popad(); }
static int SafepointRegisterStackIndex(int reg_code) {
- UNIMPLEMENTED();
- return 0;
+ return kSafepointPushRegisterIndices[reg_code];
}
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -272,6 +272,7 @@
// Is the value a tagged smi.
Condition CheckSmi(Register src);
+ Condition CheckSmi(const Operand& src);
// Is the value a non-negative tagged smi.
Condition CheckNonNegativeSmi(Register src);
@@ -300,6 +301,11 @@
// conversion to a smi.
Condition CheckUInteger32ValidSmiValue(Register src);
+ // Check whether src is a Smi, and set dst to zero if it is a smi,
+ // and to one if it isn't.
+ void CheckSmiToIndicator(Register dst, Register src);
+ void CheckSmiToIndicator(Register dst, const Operand& src);
+
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
@@ -534,6 +540,14 @@
// ---------------------------------------------------------------------------
// String macros.
+
+ // If object is a string, its map is loaded into object_map.
+ template <typename LabelType>
+ void JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string);
+
+
template <typename LabelType>
void JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
@@ -590,6 +604,22 @@
void Call(ExternalReference ext);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ // Emit call to the code we are currently generating.
+ void CallSelf() {
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+ Call(self, RelocInfo::CODE_TARGET);
+ }
+
+ // Non-x64 instructions.
+ // Push/pop all general purpose registers.
+ // Does not push rsp/rbp nor any of the assembler's special purpose registers
+ // (kScratchRegister, kSmiConstantRegister, kRootRegister).
+ void Pushad();
+ void Popad();
+ // Sets the stack as after performing Popad, without actually loading the
+ // registers.
+ void Dropad();
+
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
@@ -804,6 +834,9 @@
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
+ // Call a runtime function and save the value of XMM registers.
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
@@ -887,6 +920,10 @@
void Ret();
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
Handle<Object> CodeObject() { return code_object_; }
@@ -923,6 +960,9 @@
bool allow_stub_calls() { return allow_stub_calls_; }
private:
+ // Order general registers are pushed by Pushad.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+ static int kSafepointPushRegisterIndices[Register::kNumRegisters];
bool generating_stub_;
bool allow_stub_calls_;
@@ -953,7 +993,7 @@
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space);
+ void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
void LeaveExitFrameEpilogue();
@@ -1436,6 +1476,8 @@
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
+ // dst and src1 can be the same, because the one case that bails out
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged.
NearLabel result_ok;
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
@@ -1570,6 +1612,17 @@
template <typename LabelType>
+void MacroAssembler::JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string) {
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, not_string);
+ CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+ j(above_equal, not_string);
+}
+
+
+template <typename LabelType>
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 8c1b579..9cb88f3 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -307,28 +307,32 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss) {
+ Label* miss,
+ bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, miss,
+ support_wrappers ? &check_wrapper : miss);
// Load length directly from the string.
__ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
+ }
}
@@ -437,10 +441,9 @@
// Generates call to API function.
-static bool GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- Failure** failure) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : object passing the type check
@@ -504,13 +507,8 @@
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result =
- masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
- return true;
+ return masm->TryCallApiFunctionAndReturn(&fun,
+ argc + kFastApiCallArguments + 1);
}
@@ -523,17 +521,16 @@
arguments_(arguments),
name_(name) {}
- bool Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss,
- Failure** failure) {
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -553,8 +550,7 @@
lookup,
name,
optimization,
- miss,
- failure);
+ miss);
} else {
CompileRegular(masm,
object,
@@ -565,23 +561,22 @@
name,
holder,
miss);
- return true;
+ return Heap::undefined_value(); // Success.
}
}
private:
- bool CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label,
- Failure** failure) {
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -643,13 +638,10 @@
// Invoke function.
if (can_do_fast_api_call) {
- bool success = GenerateFastApiCall(masm,
- optimization,
- arguments_.immediate(),
- failure);
- if (!success) {
- return false;
- }
+ MaybeObject* result = GenerateFastApiCall(masm,
+ optimization,
+ arguments_.immediate());
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@@ -668,7 +660,7 @@
FreeSpaceForFastApiCall(masm, scratch1);
}
- return true;
+ return Heap::undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -1021,17 +1013,16 @@
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
@@ -1095,12 +1086,7 @@
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
- return true;
+ return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
}
@@ -2135,17 +2121,14 @@
}
if (depth != kInvalidProtoDepth) {
- Failure* failure;
// Move the return address on top of the stack.
__ movq(rax, Operand(rsp, 3 * kPointerSize));
__ movq(Operand(rsp, 0 * kPointerSize), rax);
// rsp[2 * kPointerSize] is uninitialized, rsp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
- if (!success) {
- return failure;
- }
+ MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@@ -2194,21 +2177,17 @@
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), rcx);
- Failure* failure;
- bool success = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- rax,
- &miss,
- &failure);
- if (!success) {
- return failure;
- }
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ rdx,
+ rbx,
+ rdi,
+ rax,
+ &miss);
+ if (result->IsFailure()) return result;
// Restore receiver.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2459,9 +2438,17 @@
Handle<Map>(object->map()));
__ j(not_equal, &miss);
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ Heap::kTheHoleValueRootIndex);
+ __ j(equal, &miss);
+
// Store the value in the cell.
- __ Move(rcx, Handle<JSGlobalPropertyCell>(cell));
- __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax);
+ __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
__ IncrementCounter(&Counters::named_store_global_inline, 1);
@@ -2648,12 +2635,11 @@
// -----------------------------------
Label miss;
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
+ rdi, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2812,12 +2798,11 @@
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
+ rcx, rdi, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2933,7 +2918,7 @@
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss);
+ GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3013,6 +2998,35 @@
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ rdx,
+ rax,
+ rbx,
+ rcx,
+ rax,
+ &miss,
+ &miss,
+ &miss);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
@@ -3144,6 +3158,306 @@
}
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(rdx, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &slow);
+
+ // Check that the object is a JS object.
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &slow);
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks. The map is already in rdx.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // rax: index (as a smi)
+ // rdx: JSObject
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::RootIndexForExternalArrayType(array_type));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ SmiToInteger32(rcx, rax);
+ __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // rax: index (as a smi)
+ // rdx: receiver (JSObject)
+ // rcx: untagged index
+ // rbx: elements array
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
+ break;
+ case kExternalIntArray:
+ __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
+ break;
+ case kExternalUnsignedIntArray:
+ __ movl(rcx, Operand(rbx, rcx, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // rax: index
+ // rdx: receiver
+ // For integer array types:
+ // rcx: value
+ // For floating-point array type:
+ // xmm0: value as double.
+
+ ASSERT(kSmiValueSize == 32);
+ if (array_type == kExternalUnsignedIntArray) {
+ // For the UnsignedInt array type, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ NearLabel box_int;
+
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+
+ __ Integer32ToSmi(rax, rcx);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ // The value is zero-extended since we loaded the value from memory
+ // with movl.
+ __ cvtqsi2sd(xmm0, rcx);
+
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else {
+ __ Integer32ToSmi(rax, rcx);
+ __ ret(0);
+ }
+
+ // Slow case: Jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rax); // name
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ // Return the generated code.
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(rdx, &slow);
+ // Get the map from the receiver.
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &slow);
+
+ // Check that the object is a JS object.
+ __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::RootIndexForExternalArrayType(array_type));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ SmiToInteger32(rdi, rcx); // Untag the index.
+ __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ NearLabel check_heap_number;
+ __ JumpIfNotSmi(rax, &check_heap_number);
+ // No more branches to slow case on this path. Key and receiver not needed.
+ __ SmiToInteger32(rdx, rax);
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ cvtlsi2ss(xmm0, rdx);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0);
+
+ __ bind(&check_heap_number);
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
+ __ j(not_equal, &slow);
+ // No more branches to slow case on this path.
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ // top of FPU stack: value
+ if (array_type == kExternalFloatArray) {
+ __ cvtsd2ss(xmm0, xmm0);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // Convert to int32 and store the low byte/word.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ // rdx: value (converted to an untagged integer)
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ cvttsd2si(rdx, xmm0);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ cvttsd2si(rdx, xmm0);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray: {
+ // Convert to int64, so that NaN and infinities become
+ // 0x8000000000000000, which is zero mod 2^32.
+ __ cvttsd2siq(rdx, xmm0);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0);
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 3f7b1db..31f9527 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1119,23 +1119,30 @@
}
-Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+Result VirtualFrame::CallStoreIC(Handle<String> name,
+ bool is_contextual,
+ StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
Result value = Pop();
+ RelocInfo::Mode mode;
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(rax);
__ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse();
+ mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, rax, rdx);
+ mode = RelocInfo::CODE_TARGET;
}
__ Move(rcx, name);
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+ return RawCallCodeObject(ic, mode);
}
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 0479ff0..4a9c720 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -338,7 +338,8 @@
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual);
+ Result CallStoreIC(Handle<String> name, bool is_contextual,
+ StrictModeFlag strict_mode);
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All three are dropped.
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index a7eca5b..a7422c2 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -29,11 +29,6 @@
test-api/Bug*: FAIL
-# The problem is that a code object can get a different optimizable flag
-# in crankshaft after creation.
-test-log/EquivalenceOfLoggingAndTraversal: SKIP
-
-
##############################################################################
# BUG(281): This test fails on some Linuxes.
test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
@@ -51,7 +46,6 @@
test-serialize/TestThatAlwaysFails: FAIL
test-serialize/DependentTestThatAlwaysFails: FAIL
-
##############################################################################
[ $arch == x64 ]
@@ -64,25 +58,10 @@
[ $arch == x64 && $crankshaft ]
# Tests that fail with crankshaft.
-test-deoptimization/DeoptimizeBinaryOperationADDString: FAIL
-test-deoptimization/DeoptimizeBinaryOperationADD: FAIL
-test-deoptimization/DeoptimizeBinaryOperationSUB: FAIL
-test-deoptimization/DeoptimizeBinaryOperationMUL: FAIL
test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL
-test-deoptimization/DeoptimizeBinaryOperationDIV: FAIL
test-deoptimization/DeoptimizeLoadICStoreIC: FAIL
test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL
-test-deoptimization/DeoptimizeCompare: FAIL
-
-# Tests that time out with crankshaft.
-test-api/Threading: SKIP
-
-# BUG(1049): Currently no deoptimization support.
-test-serialize/ContextSerialization: SKIP
-test-serialize/ContextDeserialization: SKIP
-test-debug/BreakPointReturn: SKIP
-test-debug/DebugStepLinearMixedICs: SKIP
-
+test-deoptimization/DeoptimizeCompare: PASS || FAIL
##############################################################################
[ $arch == arm ]
@@ -104,6 +83,11 @@
test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
test-sockets/Socket: SKIP
+# BUG(1075): Some deserialization tests fail om ARM
+test-serialize/Deserialize: SKIP
+test-serialize/DeserializeFromSecondSerializationAndRunScript2: SKIP
+test-serialize/DeserializeAndRunScript2: SKIP
+test-serialize/DeserializeFromSecondSerialization: SKIP
##############################################################################
[ $arch == arm && $crankshaft ]
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 6a2f328..b92185f 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -874,6 +874,10 @@
TestExternalPointerWrapping();
#if defined(V8_HOST_ARCH_X64)
+ // Check a value with a leading 1 bit in x64 Smi encoding.
+ expected_ptr = reinterpret_cast<void*>(0x400000000);
+ TestExternalPointerWrapping();
+
expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
TestExternalPointerWrapping();
@@ -1645,6 +1649,23 @@
CHECK_NE(hash, hash3);
int hash4 = obj->GetIdentityHash();
CHECK_EQ(hash, hash4);
+
+ // Check identity hashes behaviour in the presence of JS accessors.
+ // Put a getter for 'v8::IdentityHash' on the Object's prototype:
+ {
+ CompileRun("Object.prototype['v8::IdentityHash'] = 42;\n");
+ Local<v8::Object> o1 = v8::Object::New();
+ Local<v8::Object> o2 = v8::Object::New();
+ CHECK_NE(o1->GetIdentityHash(), o2->GetIdentityHash());
+ }
+ {
+ CompileRun(
+ "function cnst() { return 42; };\n"
+ "Object.prototype.__defineGetter__('v8::IdentityHash', cnst);\n");
+ Local<v8::Object> o1 = v8::Object::New();
+ Local<v8::Object> o2 = v8::Object::New();
+ CHECK_NE(o1->GetIdentityHash(), o2->GetIdentityHash());
+ }
}
@@ -2365,18 +2386,50 @@
}
-// Test that overwritten toString methods are not invoked on uncaught
-// exception formatting. However, they are invoked when performing
-// normal error string conversions.
+static v8::Handle<Value> Fail(const v8::Arguments& args) {
+ ApiTestFuzzer::Fuzz();
+ CHECK(false);
+ return v8::Undefined();
+}
+
+
+// Test that overwritten methods are not invoked on uncaught exception
+// formatting. However, they are invoked when performing normal error
+// string conversions.
TEST(APIThrowMessageOverwrittenToString) {
v8::HandleScope scope;
v8::V8::AddMessageListener(check_reference_error_message);
- LocalContext context;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->Set(v8_str("fail"), v8::FunctionTemplate::New(Fail));
+ LocalContext context(NULL, templ);
+ CompileRun("asdf;");
+ CompileRun("var limit = {};"
+ "limit.valueOf = fail;"
+ "Error.stackTraceLimit = limit;");
+ CompileRun("asdf");
+ CompileRun("Array.prototype.pop = fail;");
+ CompileRun("Object.prototype.hasOwnProperty = fail;");
+ CompileRun("Object.prototype.toString = function f() { return 'Yikes'; }");
+ CompileRun("Number.prototype.toString = function f() { return 'Yikes'; }");
+ CompileRun("String.prototype.toString = function f() { return 'Yikes'; }");
CompileRun("ReferenceError.prototype.toString ="
" function() { return 'Whoops' }");
CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype.constructor.name = void 0;");
+ CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype.constructor = void 0;");
+ CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype.__proto__ = new Object();");
+ CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype = new Object();");
+ CompileRun("asdf;");
v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
CHECK(string->Equals(v8_str("Whoops")));
+ CompileRun("ReferenceError.prototype.constructor = new Object();"
+ "ReferenceError.prototype.constructor.name = 1;"
+ "Number.prototype.toString = function() { return 'Whoops'; };"
+ "ReferenceError.prototype.toString = Object.prototype.toString;");
+ CompileRun("asdf;");
v8::V8::RemoveMessageListeners(check_message);
}
@@ -5273,11 +5326,13 @@
}
+static bool allowed_access_type[v8::ACCESS_KEYS + 1] = { false };
static bool NamedAccessBlocker(Local<v8::Object> global,
Local<Value> name,
v8::AccessType type,
Local<Value> data) {
- return Context::GetCurrent()->Global()->Equals(global);
+ return Context::GetCurrent()->Global()->Equals(global) ||
+ allowed_access_type[type];
}
@@ -5285,7 +5340,8 @@
uint32_t key,
v8::AccessType type,
Local<Value> data) {
- return Context::GetCurrent()->Global()->Equals(global);
+ return Context::GetCurrent()->Global()->Equals(global) ||
+ allowed_access_type[type];
}
@@ -5317,7 +5373,7 @@
}
-THREADED_TEST(AccessControl) {
+TEST(AccessControl) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
@@ -5343,6 +5399,27 @@
v8::Handle<v8::Object> global0 = context0->Global();
+ // Define a property with JS getter and setter.
+ CompileRun(
+ "function getter() { return 'getter'; };\n"
+ "function setter() { return 'setter'; }\n"
+ "Object.defineProperty(this, 'js_accessor_p', {get:getter, set:setter})");
+
+ Local<Value> getter = global0->Get(v8_str("getter"));
+ Local<Value> setter = global0->Get(v8_str("setter"));
+
+ // And define normal element.
+ global0->Set(239, v8_str("239"));
+
+ // Define an element with JS getter and setter.
+ CompileRun(
+ "function el_getter() { return 'el_getter'; };\n"
+ "function el_setter() { return 'el_setter'; };\n"
+ "Object.defineProperty(this, '42', {get: el_getter, set: el_setter});");
+
+ Local<Value> el_getter = global0->Get(v8_str("el_getter"));
+ Local<Value> el_setter = global0->Get(v8_str("el_setter"));
+
v8::HandleScope scope1;
v8::Persistent<Context> context1 = Context::New();
@@ -5351,40 +5428,187 @@
v8::Handle<v8::Object> global1 = context1->Global();
global1->Set(v8_str("other"), global0);
+ // Access blocked property.
+ CompileRun("other.blocked_prop = 1");
+
+ ExpectUndefined("other.blocked_prop");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
+ ExpectFalse("propertyIsEnumerable.call(other, 'blocked_prop')");
+
+ // Enable ACCESS_HAS
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other.blocked_prop");
+ // ... and now we can get the descriptor...
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'blocked_prop').value");
+ // ... and enumerate the property.
+ ExpectTrue("propertyIsEnumerable.call(other, 'blocked_prop')");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Access blocked element.
+ CompileRun("other[239] = 1");
+
+ ExpectUndefined("other[239]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239')");
+ ExpectFalse("propertyIsEnumerable.call(other, '239')");
+
+ // Enable ACCESS_HAS
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other[239]");
+ // ... and now we can get the descriptor...
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239').value");
+ // ... and enumerate the property.
+ ExpectTrue("propertyIsEnumerable.call(other, '239')");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Access a property with JS accessor.
+ CompileRun("other.js_accessor_p = 2");
+
+ ExpectUndefined("other.js_accessor_p");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p')");
+
+ // Enable ACCESS_HAS.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other.js_accessor_p");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_GET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+
+ ExpectString("other.js_accessor_p", "getter");
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectUndefined("other.js_accessor_p");
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectString("other.js_accessor_p", "getter");
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
+ ExpectObject(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
+ ExpectUndefined(
+ "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Access an element with JS accessor.
+ CompileRun("other[42] = 2");
+
+ ExpectUndefined("other[42]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42')");
+
+ // Enable ACCESS_HAS.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ ExpectUndefined("other[42]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_GET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+
+ ExpectString("other[42]", "el_getter");
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectUndefined("other[42]");
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
+ // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
+ allowed_access_type[v8::ACCESS_HAS] = true;
+ allowed_access_type[v8::ACCESS_GET] = true;
+ allowed_access_type[v8::ACCESS_SET] = true;
+
+ ExpectString("other[42]", "el_getter");
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
+ ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
+ ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
+
+ allowed_access_type[v8::ACCESS_SET] = false;
+ allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access_type[v8::ACCESS_HAS] = false;
+
v8::Handle<Value> value;
- // Access blocked property
- value = v8_compile("other.blocked_prop = 1")->Run();
- value = v8_compile("other.blocked_prop")->Run();
- CHECK(value->IsUndefined());
-
- value = v8_compile("propertyIsEnumerable.call(other, 'blocked_prop')")->Run();
- CHECK(value->IsFalse());
-
// Access accessible property
- value = v8_compile("other.accessible_prop = 3")->Run();
+ value = CompileRun("other.accessible_prop = 3");
CHECK(value->IsNumber());
CHECK_EQ(3, value->Int32Value());
CHECK_EQ(3, g_echo_value);
- value = v8_compile("other.accessible_prop")->Run();
+ value = CompileRun("other.accessible_prop");
CHECK(value->IsNumber());
CHECK_EQ(3, value->Int32Value());
- value =
- v8_compile("propertyIsEnumerable.call(other, 'accessible_prop')")->Run();
+ value = CompileRun(
+ "Object.getOwnPropertyDescriptor(other, 'accessible_prop').value");
+ CHECK(value->IsNumber());
+ CHECK_EQ(3, value->Int32Value());
+
+ value = CompileRun("propertyIsEnumerable.call(other, 'accessible_prop')");
CHECK(value->IsTrue());
// Enumeration doesn't enumerate accessors from inaccessible objects in
// the prototype chain even if the accessors are in themselves accessible.
- Local<Value> result =
+ value =
CompileRun("(function(){var obj = {'__proto__':other};"
"for (var p in obj)"
" if (p == 'accessible_prop' || p == 'blocked_prop') {"
" return false;"
" }"
"return true;})()");
- CHECK(result->IsTrue());
+ CHECK(value->IsTrue());
context1->Exit();
context0->Exit();
@@ -6229,7 +6453,7 @@
" var str = String(e);"
" if (str.indexOf('TypeError') == -1) return 1;"
" if (str.indexOf('[object Fun]') != -1) return 2;"
- " if (str.indexOf('#<a Fun>') == -1) return 3;"
+ " if (str.indexOf('#<Fun>') == -1) return 3;"
" return 0;"
" }"
" return 4;"
@@ -7318,6 +7542,61 @@
"garbage = undefined;");
}
+v8::Handle<v8::Value> DirectApiCallback(const v8::Arguments& args) {
+ static int count = 0;
+ if (count++ % 3 == 0) {
+ v8::V8::LowMemoryNotification(); // This should move the stub
+ GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
+ }
+ return v8::Handle<v8::Value>();
+}
+
+
+THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
+ nativeobject_templ->Set("callback",
+ v8::FunctionTemplate::New(DirectApiCallback));
+ v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
+ context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+ // call the api function multiple times to ensure direct call stub creation.
+ CompileRun(
+ "function f() {"
+ " for (var i = 1; i <= 30; i++) {"
+ " nativeobject.callback();"
+ " }"
+ "}"
+ "f();");
+}
+
+
+v8::Handle<v8::Value> ThrowingDirectApiCallback(const v8::Arguments& args) {
+ return v8::ThrowException(v8_str("g"));
+}
+
+
+THREADED_TEST(CallICFastApi_DirectCall_Throw) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::ObjectTemplate> nativeobject_templ = v8::ObjectTemplate::New();
+ nativeobject_templ->Set("callback",
+ v8::FunctionTemplate::New(ThrowingDirectApiCallback));
+ v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
+ context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+ // call the api function multiple times to ensure direct call stub creation.
+ v8::Handle<Value> result = CompileRun(
+ "var result = '';"
+ "function f() {"
+ " for (var i = 1; i <= 5; i++) {"
+ " try { nativeobject.callback(); } catch (e) { result += e; }"
+ " }"
+ "}"
+ "f(); result;");
+ CHECK_EQ(v8_str("ggggg"), result);
+}
+
+
THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
v8::HandleScope scope;
@@ -10325,6 +10604,108 @@
"i");
CHECK_EQ(255, result->Int32Value());
+ // Make sure that pixel array ICs recognize when a non-pixel array
+ // is passed to it.
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var j = 0; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels); }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(just_ints);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
+ // Make sure that pixel array ICs recognize out-of-bound accesses.
+ result = CompileRun("function pa_load(p, start) {"
+ " var sum = 0;"
+ " for (var j = start; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels,0); }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(pixels,-10);"
+ "}"
+ "result");
+ CHECK_EQ(0, result->Int32Value());
+
+ // Make sure that generic ICs properly handles a pixel array.
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var j = 0; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(just_ints); }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(pixels);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
+ // Make sure that generic load ICs recognize out-of-bound accesses in
+ // pixel arrays.
+ result = CompileRun("function pa_load(p, start) {"
+ " var sum = 0;"
+ " for (var j = start; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(just_ints,0); }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels,0); }"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(pixels,-10);"
+ "}"
+ "result");
+ CHECK_EQ(0, result->Int32Value());
+
+ // Make sure that generic ICs properly handles other types than pixel
+ // arrays (that the inlined fast pixel array test leaves the right information
+ // in the right registers).
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var j = 0; j < 256; j++) { sum += p[j]; }"
+ " return sum;"
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "just_ints = new Object();"
+ "for (var i = 0; i < 256; ++i) { just_ints[i] = i; }"
+ "for (var i = 0; i < 10; ++i) { pa_load(just_ints); }"
+ "for (var i = 0; i < 10; ++i) { pa_load(pixels); }"
+ "sparse_array = new Object();"
+ "for (var i = 0; i < 256; ++i) { sparse_array[i] = i; }"
+ "sparse_array[1000000] = 3;"
+ "for (var i = 0; i < 10; ++i) {"
+ " result = pa_load(sparse_array);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
+ // Make sure that pixel array loads are optimized by crankshaft.
+ result = CompileRun("function pa_load(p) {"
+ " var sum = 0;"
+ " for (var i=0; i<256; ++i) {"
+ " sum += p[i];"
+ " }"
+ " return sum; "
+ "}"
+ "for (var i = 0; i < 256; ++i) { pixels[i] = i; }"
+ "for (var i = 0; i < 10000; ++i) {"
+ " result = pa_load(pixels);"
+ "}"
+ "result");
+ CHECK_EQ(32640, result->Int32Value());
+
free(pixel_data);
}
@@ -10583,6 +10964,33 @@
CHECK_EQ(0, result->Int32Value());
CHECK_EQ(0,
i::Smi::cast(jsobj->GetElement(5)->ToObjectChecked())->value());
+
+ // Check truncation behavior of integral arrays.
+ const char* unsigned_data =
+ "var source_data = [0.6, 10.6];"
+ "var expected_results = [0, 10];";
+ const char* signed_data =
+ "var source_data = [0.6, 10.6, -0.6, -10.6];"
+ "var expected_results = [0, 10, 0, -10];";
+ bool is_unsigned =
+ (array_type == v8::kExternalUnsignedByteArray ||
+ array_type == v8::kExternalUnsignedShortArray ||
+ array_type == v8::kExternalUnsignedIntArray);
+
+ i::OS::SNPrintF(test_buf,
+ "%s"
+ "var all_passed = true;"
+ "for (var i = 0; i < source_data.length; i++) {"
+ " for (var j = 0; j < 8; j++) {"
+ " ext_array[j] = source_data[i];"
+ " }"
+ " all_passed = all_passed &&"
+ " (ext_array[5] == expected_results[i]);"
+ "}"
+ "all_passed;",
+ (is_unsigned ? unsigned_data : signed_data));
+ result = CompileRun(test_buf.start());
+ CHECK_EQ(true, result->BooleanValue());
}
result = CompileRun("ext_array[3] = 33;"
@@ -10977,6 +11385,26 @@
}
+TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(true,
+ 1024,
+ v8::StackTrace::kDetailed);
+
+ CompileRun(
+ "var setters = ['column', 'lineNumber', 'scriptName',\n"
+ " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
+ " 'isConstructor'];\n"
+ "for (var i = 0; i < setters.length; i++) {\n"
+ " var prop = setters[i];\n"
+ " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
+ "}\n");
+ CompileRun("throw 'exception';");
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+}
+
+
v8::Handle<Value> AnalyzeStackOfEvalWithSourceURL(const v8::Arguments& args) {
v8::HandleScope scope;
v8::Handle<v8::StackTrace> stackTrace =
@@ -12147,6 +12575,25 @@
}
+THREADED_TEST(Equals) {
+ v8::HandleScope handleScope;
+ LocalContext localContext;
+
+ v8::Handle<v8::Object> globalProxy = localContext->Global();
+ v8::Handle<Value> global = globalProxy->GetPrototype();
+
+ CHECK(global->StrictEquals(global));
+ CHECK(!global->StrictEquals(globalProxy));
+ CHECK(!globalProxy->StrictEquals(global));
+ CHECK(globalProxy->StrictEquals(globalProxy));
+
+ CHECK(global->Equals(global));
+ CHECK(!global->Equals(globalProxy));
+ CHECK(!globalProxy->Equals(global));
+ CHECK(globalProxy->Equals(globalProxy));
+}
+
+
static v8::Handle<v8::Value> Getter(v8::Local<v8::String> property,
const v8::AccessorInfo& info ) {
return v8_str("42!");
@@ -12173,3 +12620,19 @@
CHECK_EQ(1, result->Length());
CHECK_EQ(v8_str("universalAnswer"), result->Get(0));
}
+
+
+TEST(DefinePropertyPostDetach) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Handle<v8::Object> proxy = context->Global();
+ v8::Handle<v8::Function> define_property =
+ CompileRun("(function() {"
+ " Object.defineProperty("
+ " this,"
+ " 1,"
+ " { configurable: true, enumerable: true, value: 3 });"
+ "})").As<Function>();
+ context->DetachGlobal();
+ define_property->Call(proxy, 0, NULL);
+}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 0f12f98..43cf580 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -45,11 +45,7 @@
static v8::Persistent<v8::Context> env;
-// The test framework does not accept flags on the command line, so we set them
static void InitializeVM() {
- // enable generation of comments
- FLAG_debug_code = true;
-
if (env.IsEmpty()) {
env = v8::Context::New();
}
@@ -233,6 +229,8 @@
double d;
double e;
double f;
+ double g;
+ double h;
int i;
float x;
float y;
@@ -290,6 +288,15 @@
__ vmov(s31, lr);
__ vcvt_f64_s32(d4, s31);
__ vstr(d4, r4, OFFSET_OF(T, f));
+
+ // Test vabs.
+ __ vldr(d1, r4, OFFSET_OF(T, g));
+ __ vabs(d0, d1);
+ __ vstr(d0, r4, OFFSET_OF(T, g));
+ __ vldr(d2, r4, OFFSET_OF(T, h));
+ __ vabs(d0, d2);
+ __ vstr(d0, r4, OFFSET_OF(T, h));
+
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
@@ -309,6 +316,8 @@
t.d = 0.0;
t.e = 0.0;
t.f = 0.0;
+ t.g = -2718.2818;
+ t.h = 31415926.5;
t.i = 0;
t.x = 4.5;
t.y = 9.0;
@@ -317,6 +326,8 @@
CHECK_EQ(4.5, t.y);
CHECK_EQ(9.0, t.x);
CHECK_EQ(2, t.i);
+ CHECK_EQ(2718.2818, t.g);
+ CHECK_EQ(31415926.5, t.h);
CHECK_EQ(42.0, t.f);
CHECK_EQ(1.0, t.e);
CHECK_EQ(1.000000059604644775390625, t.d);
@@ -398,71 +409,189 @@
}
-static void TestRoundingMode(int32_t mode, double value, int expected) {
+enum VCVTTypes {
+ s32_f64,
+ u32_f64
+};
+
+static void TestRoundingMode(VCVTTypes types,
+ VFPRoundingMode mode,
+ double value,
+ int expected,
+ bool expected_exception = false) {
InitializeVM();
v8::HandleScope scope;
Assembler assm(NULL, 0);
- __ vmrs(r1);
- // Set custom FPSCR.
- __ bic(r2, r1, Operand(((mode ^ 3) << 22) | 0xf));
- __ orr(r2, r2, Operand(mode << 22));
- __ vmsr(r2);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
- // Load value, convert, and move back result to r0.
- __ vmov(d1, value);
- __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
- __ vmov(r0, s0);
+ Label wrong_exception;
- __ mov(pc, Operand(lr));
+ __ vmrs(r1);
+ // Set custom FPSCR.
+ __ bic(r2, r1, Operand(kVFPRoundingModeMask | kVFPExceptionMask));
+ __ orr(r2, r2, Operand(mode));
+ __ vmsr(r2);
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = Heap::CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
- CHECK(code->IsCode());
+ // Load value, convert, and move back result to r0 if everything went well.
+ __ vmov(d1, value);
+ switch (types) {
+ case s32_f64:
+ __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ case u32_f64:
+ __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Check for vfp exceptions
+ __ vmrs(r2);
+ __ tst(r2, Operand(kVFPExceptionMask));
+ // Check that we behaved as expected.
+ __ b(&wrong_exception,
+ expected_exception ? eq : ne);
+ // There was no exception. Retrieve the result and return.
+ __ vmov(r0, s0);
+ __ mov(pc, Operand(lr));
+
+ // The exception behaviour is not what we expected.
+ // Load a special value and return.
+ __ bind(&wrong_exception);
+ __ mov(r0, Operand(11223344));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()))->ToObjectChecked();
+ CHECK(code->IsCode());
#ifdef DEBUG
- Code::cast(code)->Print();
+ Code::cast(code)->Print();
#endif
- F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
- ::printf("res = %d\n", res);
- CHECK_EQ(expected, res);
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ ::printf("res = %d\n", res);
+ CHECK_EQ(expected, res);
+ }
}
TEST(7) {
// Test vfp rounding modes.
- // See ARM DDI 0406B Page A2-29.
- enum FPSCRRoungingMode {
- RN, // Round to Nearest.
- RP, // Round towards Plus Infinity.
- RM, // Round towards Minus Infinity.
- RZ // Round towards zero.
- };
+ // s32_f64 (double to integer).
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ TestRoundingMode(s32_f64, RN, 0, 0);
+ TestRoundingMode(s32_f64, RN, 0.5, 0);
+ TestRoundingMode(s32_f64, RN, -0.5, 0);
+ TestRoundingMode(s32_f64, RN, 1.5, 2);
+ TestRoundingMode(s32_f64, RN, -1.5, -2);
+ TestRoundingMode(s32_f64, RN, 123.7, 124);
+ TestRoundingMode(s32_f64, RN, -123.7, -124);
+ TestRoundingMode(s32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RN, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
+ TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
+ TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
- TestRoundingMode(RZ, 0.5, 0);
- TestRoundingMode(RZ, -0.5, 0);
- TestRoundingMode(RZ, 123.7, 123);
- TestRoundingMode(RZ, -123.7, -123);
- TestRoundingMode(RZ, 123456.2, 123456);
- TestRoundingMode(RZ, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RM, 0, 0);
+ TestRoundingMode(s32_f64, RM, 0.5, 0);
+ TestRoundingMode(s32_f64, RM, -0.5, -1);
+ TestRoundingMode(s32_f64, RM, 123.7, 123);
+ TestRoundingMode(s32_f64, RM, -123.7, -124);
+ TestRoundingMode(s32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RM, -123456.2, -123457);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
+ TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
- TestRoundingMode(RM, 0.5, 0);
- TestRoundingMode(RM, -0.5, -1);
- TestRoundingMode(RM, 123.7, 123);
- TestRoundingMode(RM, -123.7, -124);
- TestRoundingMode(RM, 123456.2, 123456);
- TestRoundingMode(RM, -123456.2, -123457);
- }
+ TestRoundingMode(s32_f64, RZ, 0, 0);
+ TestRoundingMode(s32_f64, RZ, 0.5, 0);
+ TestRoundingMode(s32_f64, RZ, -0.5, 0);
+ TestRoundingMode(s32_f64, RZ, 123.7, 123);
+ TestRoundingMode(s32_f64, RZ, -123.7, -123);
+ TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
+ TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
+ TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
+
+
+ // u32_f64 (double to integer).
+
+ // Negative values.
+ TestRoundingMode(u32_f64, RN, -0.5, 0);
+ TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RM, -0.5, 0, true);
+ TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
+
+ TestRoundingMode(u32_f64, RZ, -0.5, 0);
+ TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
+ TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
+
+ // Positive values.
+ // kMaxInt is the maximum *signed* integer: 0x7fffffff.
+ static const uint32_t kMaxUInt = 0xffffffffu;
+ TestRoundingMode(u32_f64, RZ, 0, 0);
+ TestRoundingMode(u32_f64, RZ, 0.5, 0);
+ TestRoundingMode(u32_f64, RZ, 123.7, 123);
+ TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RM, 0, 0);
+ TestRoundingMode(u32_f64, RM, 0.5, 0);
+ TestRoundingMode(u32_f64, RM, 123.7, 123);
+ TestRoundingMode(u32_f64, RM, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
+ TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
+ TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
+
+ TestRoundingMode(u32_f64, RN, 0, 0);
+ TestRoundingMode(u32_f64, RN, 0.5, 0);
+ TestRoundingMode(u32_f64, RN, 1.5, 2);
+ TestRoundingMode(u32_f64, RN, 123.7, 124);
+ TestRoundingMode(u32_f64, RN, 123456.2, 123456);
+ TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
+ TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
+ static_cast<uint32_t>(kMaxInt) + 1);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
+ TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
}
#undef __
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index 955562b..ecb42e2 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -47,14 +47,10 @@
static v8::Persistent<v8::Context> env;
-// The test framework does not accept flags on the command line, so we set them.
static void InitializeVM() {
// Disable compilation of natives.
FLAG_disable_native_files = true;
- // Enable generation of comments.
- FLAG_debug_code = true;
-
if (env.IsEmpty()) {
env = v8::Context::New();
}
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index f100b73..5d292df 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -48,6 +48,12 @@
using v8::internal::rdx;
using v8::internal::rbp;
using v8::internal::rsp;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::r12;
+using v8::internal::r13;
+using v8::internal::times_1;
+
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
using v8::internal::less_equal;
@@ -289,4 +295,47 @@
CHECK_EQ(1, result);
}
+
+TEST(OperandRegisterDependency) {
+ int offsets[4] = {0, 1, 0xfed, 0xbeefcad};
+ for (int i = 0; i < 4; i++) {
+ int offset = offsets[i];
+ CHECK(Operand(rax, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rax, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rax, offset).AddressUsesRegister(rcx));
+
+ CHECK(Operand(rax, rax, times_1, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rax, rax, times_1, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rax, rax, times_1, offset).AddressUsesRegister(rcx));
+
+ CHECK(Operand(rax, rcx, times_1, offset).AddressUsesRegister(rax));
+ CHECK(Operand(rax, rcx, times_1, offset).AddressUsesRegister(rcx));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(r9));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(rdx));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(rsp));
+
+ CHECK(Operand(rsp, offset).AddressUsesRegister(rsp));
+ CHECK(!Operand(rsp, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rsp, offset).AddressUsesRegister(r12));
+
+ CHECK(Operand(rbp, offset).AddressUsesRegister(rbp));
+ CHECK(!Operand(rbp, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rbp, offset).AddressUsesRegister(r13));
+
+ CHECK(Operand(rbp, rax, times_1, offset).AddressUsesRegister(rbp));
+ CHECK(Operand(rbp, rax, times_1, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(rcx));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(r13));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(rsp));
+
+ CHECK(Operand(rsp, rbp, times_1, offset).AddressUsesRegister(rsp));
+ CHECK(Operand(rsp, rbp, times_1, offset).AddressUsesRegister(rbp));
+ CHECK(!Operand(rsp, rbp, times_1, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rsp, rbp, times_1, offset).AddressUsesRegister(r12));
+ CHECK(!Operand(rsp, rbp, times_1, offset).AddressUsesRegister(r13));
+ }
+}
+
#undef __
diff --git a/test/cctest/test-bignum-dtoa.cc b/test/cctest/test-bignum-dtoa.cc
index 51a9057..a696ed8 100644
--- a/test/cctest/test-bignum-dtoa.cc
+++ b/test/cctest/test-bignum-dtoa.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,7 +44,7 @@
// Removes trailing '0' digits.
// Can return the empty string if all digits are 0.
static void TrimRepresentation(Vector<char> representation) {
- int len = strlen(representation.start());
+ int len = StrLength(representation.start());
int i;
for (i = len - 1; i >= 0; --i) {
if (representation[i] != '0') break;
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index b9f6038..441aae6 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -997,7 +997,7 @@
CheckDebugBreakFunction(&env,
"function f2(){x=1;}", "f2",
0,
- v8::internal::RelocInfo::CODE_TARGET,
+ v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
Builtins::builtin(Builtins::StoreIC_DebugBreak));
CheckDebugBreakFunction(&env,
"function f3(){var a=x;}", "f3",
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index c375831..dea0db9 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -435,6 +435,11 @@
COMPARE(vmov(s31, r10),
"ee0faa90 vmov s31, r10");
+ COMPARE(vabs(d0, d1),
+ "eeb00bc1 vabs d0, d1");
+ COMPARE(vabs(d3, d4, mi),
+ "4eb03bc4 vabsmi d3, d4");
+
COMPARE(vadd(d0, d1, d2),
"ee310b02 vadd.f64 d0, d1, d2");
COMPARE(vadd(d3, d4, d5, mi),
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 30d708e..c995aa8 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -446,6 +446,14 @@
}
}
+ {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope scope(SSE4_1);
+ __ pextrd(Operand(eax), xmm0, 1);
+ __ pinsrd(xmm1, Operand(eax), 0);
+ }
+ }
+
__ ret(0);
CodeDesc desc;
diff --git a/test/cctest/test-dtoa.cc b/test/cctest/test-dtoa.cc
index ff0b660..66c2aaf 100644
--- a/test/cctest/test-dtoa.cc
+++ b/test/cctest/test-dtoa.cc
@@ -44,7 +44,7 @@
// Removes trailing '0' digits.
static void TrimRepresentation(Vector<char> representation) {
- int len = strlen(representation.start());
+ int len = StrLength(representation.start());
int i;
for (i = len - 1; i >= 0; --i) {
if (representation[i] != '0') break;
diff --git a/test/cctest/test-fast-dtoa.cc b/test/cctest/test-fast-dtoa.cc
index 30d2476..d311713 100644
--- a/test/cctest/test-fast-dtoa.cc
+++ b/test/cctest/test-fast-dtoa.cc
@@ -19,7 +19,7 @@
// Removes trailing '0' digits.
static void TrimRepresentation(Vector<char> representation) {
- int len = strlen(representation.start());
+ int len = StrLength(representation.start());
int i;
for (i = len - 1; i >= 0; --i) {
if (representation[i] != '0') break;
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 503e0cf..032a183 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -1074,6 +1074,21 @@
return true;
}
}
+ // Code objects can change their optimizability: code object may start
+ // as optimizable, but later be discovered to be actually not optimizable.
+ // Alas, we don't record this info as of now, so we allow cases when
+ // ref is thought to be optimizable while traverse finds it to be
+ // not optimizable.
+ if (ref_s[1] == '~') { // Code object used to be optimizable
+ if (new_s[1] == ' ') { // ...but later was set unoptimizable.
+ CHECK_EQ('"', ref_s[0]);
+ CHECK_EQ('"', new_s[0]);
+ ref_s += 2; // Cut the leading quote and the marker
+ ref_len -= 2;
+ new_s += 1; // Cut the leading quote only.
+ new_len -= 1;
+ }
+ }
return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0;
}
diff --git a/test/cctest/test-strtod.cc b/test/cctest/test-strtod.cc
index f5547db..da6b07b 100644
--- a/test/cctest/test-strtod.cc
+++ b/test/cctest/test-strtod.cc
@@ -324,6 +324,25 @@
StrtodChar("5708990770823839207320493820740630171355185151999", -3));
CHECK_EQ(5708990770823839524233143877797980545530986496.0,
StrtodChar("5708990770823839207320493820740630171355185152001", -3));
+
+ // The following test-cases got some public attention in early 2011 when they
+ // sent Java and PHP into an infinite loop.
+ CHECK_EQ(2.225073858507201e-308, StrtodChar("22250738585072011", -324));
+ CHECK_EQ(2.22507385850720138309e-308,
+ StrtodChar("22250738585072011360574097967091319759348195463516456480"
+ "23426109724822222021076945516529523908135087914149158913"
+ "03962110687008643869459464552765720740782062174337998814"
+ "10632673292535522868813721490129811224514518898490572223"
+ "07285255133155755015914397476397983411801999323962548289"
+ "01710708185069063066665599493827577257201576306269066333"
+ "26475653000092458883164330377797918696120494973903778297"
+ "04905051080609940730262937128958950003583799967207254304"
+ "36028407889577179615094551674824347103070260914462157228"
+ "98802581825451803257070188608721131280795122334262883686"
+ "22321503775666622503982534335974568884423900265498198385"
+ "48794829220689472168983109969836584681402285424333066033"
+ "98508864458040010349339704275671864433837704860378616227"
+ "71738545623065874679014086723327636718751", -1076));
}
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index a51bd30..39754ca 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -41,17 +41,9 @@
# We are compatible with Safari and Firefox.
chapter11/11.1/11.1.5: UNIMPLEMENTED
-# Delete returns true in eval even when it should return false.
-# Please see http://code.google.com/p/v8/issues/detail?id=759
-chapter11/11.4/11.4.1//11.4.1-4.a-5: FAIL
-chapter11/11.4/11.4.1//11.4.1-4.a-7: FAIL
-
# We do not have a global object called 'global' as required by tests.
chapter15/15.1: FAIL_OK
-# NOT IMPLEMENTED: bind
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED
-
# NaN is writable. We are compatible with JSC.
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
# Infinity is writable. We are compatible with JSC.
@@ -99,7 +91,7 @@
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-3: FAIL_OK
-# SUBSETFAIL + we do not implement Function.prototype.bind.
+# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-4: FAIL_OK
# SUBSETFAIL
@@ -180,9 +172,6 @@
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
-# NOT IMPLEMENTED: bind on Function.prototype.
-chapter15/15.3/15.3.4/15.3.4.5/15.3.4.5-0-1: UNIMPLEMENTED
-
# Bad test - the spec does not say anything about throwing errors
# on calling Array.prototype.indexOf with undefined as argument.
chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-1-1: FAIL_OK
@@ -240,6 +229,263 @@
chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
+##############################################################################
+# Unimplemented parts of strict mode
+# Setting expectations to fail only so that the tests trigger as soon as
+# the strict mode feature gets implemented
+
+# A directive preceeding an 'use strict' directive may not contain
+# an OctalEscapeSequence
+# Incorrect test - need double escape in eval.
+chapter07/7.8/7.8.4/7.8.4-1-s: FAIL
+
+# this is not coerced to an object in strict mode (Number)
+chapter10/10.4/10.4.3/10.4.3-1-1-s: FAIL
+# this is not coerced to an object in strict mode (string)
+chapter10/10.4/10.4.3/10.4.3-1-2-s: FAIL
+# this is not coerced to an object in strict mode (undefined)
+chapter10/10.4/10.4.3/10.4.3-1-3-s: FAIL
+# this is not coerced to an object in strict mode (boolean)
+chapter10/10.4/10.4.3/10.4.3-1-4-s: FAIL
+
+# arguments[i] remains same after changing actual parameters in strict mode
+chapter10/10.6/10.6-10-c-ii-1-s: FAIL
+# arguments[i] doesn't map to actual parameters in strict mode
+chapter10/10.6/10.6-10-c-ii-2-s: FAIL
+
+# Accessing caller property of Arguments object throws TypeError in strict mode
+chapter10/10.6/10.6-13-b-1-s: FAIL
+# arguments.caller exists in strict mode
+chapter10/10.6/10.6-13-b-2-s: FAIL
+# arguments.caller is non-configurable in strict mode
+chapter10/10.6/10.6-13-b-3-s: FAIL
+# Accessing callee property of Arguments object throws TypeError in strict mode
+chapter10/10.6/10.6-13-c-1-s: FAIL
+# arguments.callee is non-configurable in strict mode
+chapter10/10.6/10.6-13-c-3-s: FAIL
+
+# simple assignment throws TypeError if LeftHandSide is a property reference
+# with a primitive base value (this is undefined)
+chapter11/11.13/11.13.1/11.13.1-1-7-s: FAIL
+
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Global.NaN)
+chapter11/11.13/11.13.1/11.13.1-4-2-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Global.Infinity)
+chapter11/11.13/11.13.1/11.13.1-4-3-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Global.length)
+chapter11/11.13/11.13.1/11.13.1-4-4-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Object.length)
+chapter11/11.13/11.13.1/11.13.1-4-5-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Function.length)
+chapter11/11.13/11.13.1/11.13.1-4-6-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Array.length)
+chapter11/11.13/11.13.1/11.13.1-4-7-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (String.length)
+chapter11/11.13/11.13.1/11.13.1-4-8-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Boolean.length)
+chapter11/11.13/11.13.1/11.13.1-4-9-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Number.length)
+chapter11/11.13/11.13.1/11.13.1-4-10-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Date.length)
+chapter11/11.13/11.13.1/11.13.1-4-11-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (RegExp.length)
+chapter11/11.13/11.13.1/11.13.1-4-12-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Error.length)
+chapter11/11.13/11.13.1/11.13.1-4-13-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Number.MAX_VALUE)
+chapter11/11.13/11.13.1/11.13.1-4-14-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Number.MIN_VALUE)
+chapter11/11.13/11.13.1/11.13.1-4-15-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Number.NaN)
+chapter11/11.13/11.13.1/11.13.1-4-16-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Number.NEGATIVE_INFINITY)
+chapter11/11.13/11.13.1/11.13.1-4-17-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Number.POSITIVE_INFINITY)
+chapter11/11.13/11.13.1/11.13.1-4-18-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.E)
+chapter11/11.13/11.13.1/11.13.1-4-19-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.LN10)
+chapter11/11.13/11.13.1/11.13.1-4-20-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.LN2)
+chapter11/11.13/11.13.1/11.13.1-4-21-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.LOG2E)
+chapter11/11.13/11.13.1/11.13.1-4-22-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.LOG10E)
+chapter11/11.13/11.13.1/11.13.1-4-23-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.PI)
+chapter11/11.13/11.13.1/11.13.1-4-24-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.SQRT1_2)
+chapter11/11.13/11.13.1/11.13.1-4-25-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Math.SQRT2)
+chapter11/11.13/11.13.1/11.13.1-4-26-s: FAIL
+# simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Global.undefined)
+chapter11/11.13/11.13.1/11.13.1-4-27-s: FAIL
+
+# delete operator throws TypeError when deleting a non-configurable data
+# property in strict mode
+chapter11/11.4/11.4.1/11.4.1-4.a-3-s: FAIL
+# delete operator throws TypeError when when deleting a non-configurable
+# data property in strict mode (Global.NaN)
+chapter11/11.4/11.4.1/11.4.1-4.a-4-s: FAIL
+# delete operator throws TypeError when deleting a non-configurable data
+# property in strict mode (Math.LN2)
+chapter11/11.4/11.4.1/11.4.1-4.a-9-s: FAIL
+
+# delete operator throws ReferenceError when deleting a direct reference
+# to a var in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-1-s: FAIL
+# delete operator throws ReferenceError when deleting a direct reference
+# to a function argument in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-2-s: FAIL
+# delete operator throws ReferenceError when deleting a direct reference
+# to a function name in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-3-s: FAIL
+# delete operator throws SyntaxError when deleting a direct reference
+# to a function argument(object) in strict mode
+chapter11/11.4/11.4.1/11.4.1-5-4-s: FAIL
+
+# eval - a function declaring a var named 'eval' throws EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-1-s: FAIL
+# eval - a function assigning into 'eval' throws EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-2-s: FAIL
+# eval - a function expr declaring a var named 'eval' throws EvalError
+# in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-3-s: FAIL
+# eval - a function expr assigning into 'eval' throws a EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-4-s: FAIL
+# eval - a Function declaring var named 'eval' throws EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-5-s: FAIL
+# eval - a Function assigning into 'eval' throws EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-6-s: FAIL
+# eval - a direct eval declaring a var named 'eval' throws EvalError
+# in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-7-s: FAIL
+# eval - a direct eval assigning into 'eval' throws EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-8-s: FAIL
+# eval - an indirect eval declaring a var named 'eval' throws EvalError
+# in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-9-s: FAIL
+# eval - an indirect eval assigning into 'eval' throws EvalError in strict mode
+# Invalid test case. SyntaxError should be expected instead of EvalError.
+chapter12/12.2/12.2.1/12.2.1-10-s: FAIL
+
+# SyntaxError if eval used as function identifier in function declaration
+# with strict body
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-3-s: FAIL
+# SyntaxError if eval used as function identifier in function expression
+# with strict body
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-4-s: FAIL
+# SyntaxError if eval used as function identifier in function declaration
+# in strict code
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-5-s: FAIL
+# SyntaxError if eval used as function identifier in function expression
+# in strict code
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-6-s: FAIL
+# SyntaxError if arguments used as function identifier in function declaration
+# with strict body
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-9-s: FAIL
+# SyntaxError if arguments used as function identifier in function expression
+# with strict body
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-10-s: FAIL
+# SyntaxError if arguments used as function identifier in function declaration
+# in strict code
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-11-s: FAIL
+# SyntaxError if arguments used as function identifier in function expression
+# in strict code
+# Test fails to return true on success (invalid test case).
+chapter13/13.1/13.1-3-12-s: FAIL
+
+# 'use strict' directive - correct usage
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-1-s: FAIL
+# "use strict" directive - correct usage double quotes
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-2-s: FAIL
+# 'use strict' directive - may follow other directives
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-8-s: FAIL
+# 'use strict' directive - may occur multiple times
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-9-s: FAIL
+# other directives - may follow 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-10-s: FAIL
+# comments may preceed 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-11-s: FAIL
+# comments may follow 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-12-s: FAIL
+# semicolon insertion works for'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-13-s: FAIL
+# semicolon insertion may come before 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-14-s: FAIL
+# blank lines may come before 'use strict' directive
+# depends on "this is not coerced to an object in strict mode (undefined)"
+chapter14/14.1/14.1-15-s: FAIL
+
+# Duplicate combined parameter name allowed in Function constructor called
+# in strict mode if body not strict
+# Test fails to return true on success (invalid test case).
+chapter15/15.3/15.3.2/15.3.2.1/15.3.2.1-11-6-s: FAIL
+
+# Array.prototype.every - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1-s: FAIL
+# Array.prototype.some - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-5-1-s: FAIL
+# Array.prototype.forEach - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1-s: FAIL
+# Array.prototype.map - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1-s: FAIL
+# Array.prototype.filter - thisArg not passed to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.20/15.4.4.20-5-1-s: FAIL
+# Array.prototype.reduce - null passed as thisValue to strict callbackfn
+chapter15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-ii-4-s: FAIL
+
[ $arch == mips ]
# Skip all tests on MIPS.
diff --git a/test/mjsunit/array-splice.js b/test/mjsunit/array-splice.js
index 68dd9b2..0e307b5 100644
--- a/test/mjsunit/array-splice.js
+++ b/test/mjsunit/array-splice.js
@@ -339,6 +339,20 @@
})();
+// Check the case of JS builtin .splice()
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var array = [1, 2, 3, 4];
+ Array.prototype[3] = 'foo'; // To force JS builtin.
+
+ var spliced = array.splice();
+
+ assertEquals([], spliced);
+ assertEquals([1, 2, 3, 4], array);
+ }
+})();
+
+
// Check the behaviour when approaching maximal values for length.
(function() {
for (var i = 0; i < 7; i++) {
diff --git a/test/mjsunit/compiler/literals.js b/test/mjsunit/compiler/literals.js
index d846cf5..e910bb3 100644
--- a/test/mjsunit/compiler/literals.js
+++ b/test/mjsunit/compiler/literals.js
@@ -88,3 +88,6 @@
assertEquals(19, eval('var a=1, b=2; [a,b,3,4]; 19'));
assertEquals(23, eval('var a=1, b=2; c=23; [a,b,3,4]; c'));
+// Test that literals work for non-smi indices.
+// Ensure hash-map collision if using value as hash.
+var o = {"2345678901" : 42, "2345678901" : 30};
diff --git a/test/mjsunit/compiler/regress-1085.js b/test/mjsunit/compiler/regress-1085.js
new file mode 100644
index 0000000..5d787a4
--- /dev/null
+++ b/test/mjsunit/compiler/regress-1085.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Test correct checks for negative zero.
+// This test relies on specific type feedback for Math.min.
+function f(x) { return 1 / Math.min(1, x); }
+
+for (var i=0; i<1000000; i++) f(1);
+
+assertEquals(-Infinity, f(-0));
diff --git a/test/mjsunit/compiler/regress-arguments.js b/test/mjsunit/compiler/regress-arguments.js
index 234d3fb..ebae5a0 100644
--- a/test/mjsunit/compiler/regress-arguments.js
+++ b/test/mjsunit/compiler/regress-arguments.js
@@ -46,4 +46,7 @@
return f.apply(v, arguments);
}
-for (var i=0; i<1000000; i++) assertEquals(void 0, u());
+Number.prototype.foo = 42;
+delete Number.prototype.foo;
+
+for (var i=0; i<100000; i++) assertEquals(void 0, u());
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/cyclic-error-to-string.js
new file mode 100644
index 0000000..2502b53
--- /dev/null
+++ b/test/mjsunit/cyclic-error-to-string.js
@@ -0,0 +1,46 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test printing of cyclic errors which return the empty string for
+// compatibility with Safari and Firefox.
+
+var e = new Error();
+assertEquals('Error', e + '');
+
+e = new Error();
+e.name = e;
+e.message = e;
+e.stack = e;
+e.arguments = e;
+assertEquals(': ', e + '');
+
+e = new Error();
+e.name = [ e ];
+e.message = [ e ];
+e.stack = [ e ];
+e.arguments = [ e ];
+assertEquals(': ', e + '');
diff --git a/test/mjsunit/debug-backtrace-text.js b/test/mjsunit/debug-backtrace-text.js
index 67c6746..61648fa 100644
--- a/test/mjsunit/debug-backtrace-text.js
+++ b/test/mjsunit/debug-backtrace-text.js
@@ -80,9 +80,9 @@
// 1: Call distance on Point where distance is a direct property
// 2: Call on function an array element 2
// 3: [anonymous]
- assertEquals("#<a Point>.distanceTo(p=#<a Point>)", exec_state.frame(0).invocationText());
- assertEquals("#<a Point>.distanceTo(p=#<a Point>)", exec_state.frame(1).invocationText());
- assertEquals("#<an Array>[2](aka distance)(p=#<a Point>, q=#<a Point>)", exec_state.frame(2).invocationText());
+ assertEquals("#<Point>.distanceTo(p=#<Point>)", exec_state.frame(0).invocationText());
+ assertEquals("#<Point>.distanceTo(p=#<Point>)", exec_state.frame(1).invocationText());
+ assertEquals("#<Array>[2](aka distance)(p=#<Point>, q=#<Point>)", exec_state.frame(2).invocationText());
assertEquals("[anonymous]()", exec_state.frame(3).invocationText());
listenerCalled = true;
} else {
diff --git a/test/mjsunit/delete-global-properties.js b/test/mjsunit/delete-global-properties.js
index b3813dc..2acf591 100644
--- a/test/mjsunit/delete-global-properties.js
+++ b/test/mjsunit/delete-global-properties.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,6 +32,17 @@
assertTrue("tmp" in this);
function f() { return 1; }
assertFalse(delete f); // should be DONT_DELETE
-assertEquals(1, f());
+assertEquals(1, f());
-/* Perhaps related to bugs/11? */
+// Check that deleting and reintroducing global variables works.
+// Get into the IC case for storing to a deletable global property.
+function introduce_x() { x = 42; }
+for (var i = 0; i < 10; i++) introduce_x();
+// Check that the property has been introduced.
+assertTrue(this.hasOwnProperty('x'));
+// Check that deletion works.
+delete x;
+assertFalse(this.hasOwnProperty('x'));
+// Check that reintroduction works.
+introduce_x();
+assertTrue(this.hasOwnProperty('x'));
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
index ceb7715..79c1fac 100644
--- a/test/mjsunit/get-own-property-descriptor.js
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -103,3 +103,19 @@
objWithProto[0] = 'bar';
var descWithProto = Object.getOwnPropertyDescriptor(objWithProto, '10');
assertEquals(undefined, descWithProto);
+
+// Test elements on global proxy object.
+var global = (function() { return this; })();
+
+global[42] = 42;
+
+function el_getter() { return 239; };
+function el_setter() {};
+Object.defineProperty(global, '239', {get: el_getter, set: el_setter});
+
+var descRegularElement = Object.getOwnPropertyDescriptor(global, '42');
+assertEquals(42, descRegularElement.value);
+
+var descAccessorElement = Object.getOwnPropertyDescriptor(global, '239');
+assertEquals(el_getter, descAccessorElement.get);
+assertEquals(el_setter, descAccessorElement.set);
diff --git a/test/mjsunit/getter-in-prototype.js b/test/mjsunit/getter-in-prototype.js
index dd26c53..5563123 100644
--- a/test/mjsunit/getter-in-prototype.js
+++ b/test/mjsunit/getter-in-prototype.js
@@ -31,9 +31,11 @@
var o = {};
var p = {};
p.__defineGetter__('x', function(){});
+p.__defineGetter__(0, function(){});
o.__proto__ = p;
assertThrows("o.x = 42");
+assertThrows("o[0] = 42");
function f() {
with(o) {
@@ -48,3 +50,9 @@
x = 42;
}
assertThrows("g()");
+
+__proto__ = p;
+function g2() {
+ this[0] = 42;
+}
+assertThrows("g2()");
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index a0be8dd..812ffeb 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -415,3 +415,17 @@
falseNum.__proto__ = Number.prototype;
falseNum.toString = function() { return 42; };
assertEquals('"42"', JSON.stringify(falseNum));
+
+// We don't currently allow plain properties called __proto__ in JSON
+// objects in JSON.parse. Instead we read them as we would JS object
+// literals. If we change that, this test should change with it.
+//
+// Parse a non-object value as __proto__. This must not create a
+// __proto__ property different from the original, and should not
+// change the original.
+var o = JSON.parse('{"__proto__":5}');
+assertEquals(Object.prototype, o.__proto__); // __proto__ isn't changed.
+assertEquals(0, Object.keys(o).length); // __proto__ isn't added as enumerable.
+
+
+
diff --git a/test/mjsunit/math-pow.js b/test/mjsunit/math-pow.js
index e732955..30d0cbd 100644
--- a/test/mjsunit/math-pow.js
+++ b/test/mjsunit/math-pow.js
@@ -58,10 +58,11 @@
assertEquals(Infinity, Math.pow(2, Infinity));
assertEquals(Infinity, Math.pow(-2, Infinity));
-assertEquals(+0, Math.pow(1.1, -Infinity));
-assertEquals(+0, Math.pow(-1.1, -Infinity));
-assertEquals(+0, Math.pow(2, -Infinity));
-assertEquals(+0, Math.pow(-2, -Infinity));
+// Because +0 == -0, we need to compare 1/{+,-}0 to {+,-}Infinity
+assertEquals(+Infinity, 1/Math.pow(1.1, -Infinity));
+assertEquals(+Infinity, 1/Math.pow(-1.1, -Infinity));
+assertEquals(+Infinity, 1/Math.pow(2, -Infinity));
+assertEquals(+Infinity, 1/Math.pow(-2, -Infinity));
assertEquals(NaN, Math.pow(1, Infinity));
assertEquals(NaN, Math.pow(1, -Infinity));
@@ -81,8 +82,8 @@
assertEquals(Infinity, Math.pow(Infinity, 0.1));
assertEquals(Infinity, Math.pow(Infinity, 2));
-assertEquals(+0, Math.pow(Infinity, -0.1));
-assertEquals(+0, Math.pow(Infinity, -2));
+assertEquals(+Infinity, 1/Math.pow(Infinity, -0.1));
+assertEquals(+Infinity, 1/Math.pow(Infinity, -2));
assertEquals(-Infinity, Math.pow(-Infinity, 3));
assertEquals(-Infinity, Math.pow(-Infinity, 13));
@@ -90,23 +91,23 @@
assertEquals(Infinity, Math.pow(-Infinity, 3.1));
assertEquals(Infinity, Math.pow(-Infinity, 2));
-assertEquals(-0, Math.pow(-Infinity, -3));
-assertEquals(-0, Math.pow(-Infinity, -13));
+assertEquals(-Infinity, 1/Math.pow(-Infinity, -3));
+assertEquals(-Infinity, 1/Math.pow(-Infinity, -13));
-assertEquals(+0, Math.pow(-Infinity, -3.1));
-assertEquals(+0, Math.pow(-Infinity, -2));
+assertEquals(+Infinity, 1/Math.pow(-Infinity, -3.1));
+assertEquals(+Infinity, 1/Math.pow(-Infinity, -2));
-assertEquals(+0, Math.pow(+0, 1.1));
-assertEquals(+0, Math.pow(+0, 2));
+assertEquals(+Infinity, 1/Math.pow(+0, 1.1));
+assertEquals(+Infinity, 1/Math.pow(+0, 2));
assertEquals(Infinity, Math.pow(+0, -1.1));
assertEquals(Infinity, Math.pow(+0, -2));
-assertEquals(-0, Math.pow(-0, 3));
-assertEquals(-0, Math.pow(-0, 13));
+assertEquals(-Infinity, 1/Math.pow(-0, 3));
+assertEquals(-Infinity, 1/Math.pow(-0, 13));
-assertEquals(+0, Math.pow(-0, 3.1));
-assertEquals(+0, Math.pow(-0, 2));
+assertEquals(+Infinity, 1/Math.pow(-0, 3.1));
+assertEquals(+Infinity, 1/Math.pow(-0, 2));
assertEquals(-Infinity, Math.pow(-0, -3));
assertEquals(-Infinity, Math.pow(-0, -13));
@@ -123,6 +124,18 @@
assertEquals(NaN, Math.pow(-1000, 1.1));
assertEquals(NaN, Math.pow(-1000, -1.1));
+assertEquals(+Infinity, 1/Math.pow(-0, 0.5));
+assertEquals(+Infinity, 1/Math.pow(-0, 0.6));
+assertEquals(-Infinity, 1/Math.pow(-0, 1));
+assertEquals(-Infinity, 1/Math.pow(-0, 10000000001));
+
+assertEquals(+Infinity, Math.pow(-0, -0.5));
+assertEquals(+Infinity, Math.pow(-0, -0.6));
+assertEquals(-Infinity, Math.pow(-0, -1));
+assertEquals(-Infinity, Math.pow(-0, -10000000001));
+
+
+
// Tests from Sputnik S8.5_A13_T1.
assertTrue((1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
assertTrue((1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
diff --git a/test/mjsunit/mirror-error.js b/test/mjsunit/mirror-error.js
index 4ed8c1b..9fea17c 100644
--- a/test/mjsunit/mirror-error.js
+++ b/test/mjsunit/mirror-error.js
@@ -76,7 +76,7 @@
}
assertTrue(found_message, 'Property message not found');
}
-
+
// Check the formatted text (regress 1231579).
assertEquals(fromJSON.text, e.toString(), 'toString');
}
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 39ddf5a..4012799 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -105,6 +105,10 @@
regress/regress-3218915: SKIP
regress/regress-3247124: SKIP
+# Requires bigger stack size in the Genesis and if stack size is increased,
+# the test requires too much time to run. However, the problem test covers
+# should be platform-independent.
+regress/regress-1132: SKIP
##############################################################################
[ $arch == arm && $crankshaft ]
@@ -112,6 +116,8 @@
# Test that currently fails with crankshaft on ARM.
compiler/simple-osr: FAIL
+# BUG (1094)
+regress/regress-deopt-gc: SKIP
##############################################################################
[ $arch == x64 && $crankshaft ]
@@ -119,9 +125,8 @@
# BUG (1026) This test is currently flaky.
compiler/simple-osr: SKIP
-# BUG(1049): Currently no deoptimization support.
-debug-liveedit-newsource: SKIP
-debug-liveedit-1: SKIP
+# BUG (1094)
+regress/regress-deopt-gc: SKIP
##############################################################################
[ $arch == mips ]
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index d24a4e5..a8a3213 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -749,14 +749,33 @@
assertTrue(desc.enumerable);
assertFalse(desc.configurable);
-// Ensure that we can't overwrite the non configurable element.
+// Can use defineProperty to change the value of a non
+// configurable property.
try {
Object.defineProperty(obj6, '2', descElement);
+ desc = Object.getOwnPropertyDescriptor(obj6, '2');
+ assertEquals(desc.value, 'foobar');
+} catch (e) {
+ assertUnreachable();
+}
+
+// Ensure that we can't change the descriptor of a
+// non configurable property.
+try {
+ var descAccessor = { get: function() { return 0; } };
+ Object.defineProperty(obj6, '2', descAccessor);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
+Object.defineProperty(obj6, '2', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(obj6, '2');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
Object.defineProperty(obj6, '3', descElementNonWritable);
desc = Object.getOwnPropertyDescriptor(obj6, '3');
assertEquals(desc.value, 'foofoo');
@@ -827,14 +846,33 @@
assertTrue(desc.enumerable);
assertFalse(desc.configurable);
-// Ensure that we can't overwrite the non configurable element.
+// Can use defineProperty to change the value of a non
+// configurable property of an array.
try {
Object.defineProperty(arr, '2', descElement);
+ desc = Object.getOwnPropertyDescriptor(arr, '2');
+ assertEquals(desc.value, 'foobar');
+} catch (e) {
+ assertUnreachable();
+}
+
+// Ensure that we can't change the descriptor of a
+// non configurable property.
+try {
+ var descAccessor = { get: function() { return 0; } };
+ Object.defineProperty(arr, '2', descAccessor);
assertUnreachable();
} catch (e) {
assertTrue(/Cannot redefine property/.test(e));
}
+Object.defineProperty(arr, '2', descElementNonWritable);
+desc = Object.getOwnPropertyDescriptor(arr, '2');
+assertEquals(desc.value, 'foofoo');
+assertFalse(desc.writable);
+assertTrue(desc.enumerable);
+assertFalse(desc.configurable);
+
Object.defineProperty(arr, '3', descElementNonWritable);
desc = Object.getOwnPropertyDescriptor(arr, '3');
assertEquals(desc.value, 'foofoo');
@@ -898,3 +936,98 @@
assertEquals(undefined, o.x);
o.x = 37;
assertEquals(undefined, o.x);
+
+function testDefineProperty(obj, propertyName, desc, resultDesc) {
+ Object.defineProperty(obj, propertyName, desc);
+ var actualDesc = Object.getOwnPropertyDescriptor(obj, propertyName);
+ assertEquals(resultDesc.enumerable, actualDesc.enumerable);
+ assertEquals(resultDesc.configurable, actualDesc.configurable);
+ if (resultDesc.hasOwnProperty('value')) {
+ assertEquals(resultDesc.value, actualDesc.value);
+ assertEquals(resultDesc.writable, actualDesc.writable);
+ assertFalse(resultDesc.hasOwnProperty('get'));
+ assertFalse(resultDesc.hasOwnProperty('set'));
+ } else {
+ assertEquals(resultDesc.get, actualDesc.get);
+ assertEquals(resultDesc.set, actualDesc.set);
+ assertFalse(resultDesc.hasOwnProperty('value'));
+ assertFalse(resultDesc.hasOwnProperty('writable'));
+ }
+}
+
+// tests redefining existing property with a generic descriptor
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : true },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { configurable : true },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : false },
+ { value : 42, writable : true, enumerable : false, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { configurable : false },
+ { value : 42, writable : true, enumerable : true, configurable : false });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : true, configurable : true },
+ { value : 42, writable : true, enumerable : true, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : true },
+ { value : 42, writable : true, enumerable : false, configurable : true });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : true, configurable : false },
+ { value : 42, writable : true, enumerable : true, configurable : false });
+
+o = { p : 42 };
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { value : 42, writable : true, enumerable : false, configurable : false });
+
+// can make a writable, non-configurable field non-writable
+o = { p : 42 };
+Object.defineProperty(o, 'p', { configurable: false });
+testDefineProperty(o, 'p',
+ { writable: false },
+ { value : 42, writable : false, enumerable : true, configurable : false });
+
+// redefine of get only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+ { get : getter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { get: getter1, set: undefined, enumerable : false, configurable : false });
+
+// redefine of get/set only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+ { get: getter1, set: setter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { get: getter1, set: setter1, enumerable : false, configurable : false });
+
+// redefine of set only property with generic descriptor
+o = {};
+Object.defineProperty(o, 'p',
+ { set : setter1, enumerable: true, configurable: true });
+testDefineProperty(o, 'p',
+ { enumerable : false, configurable : false },
+ { get: undefined, set: setter1, enumerable : false, configurable : false });
diff --git a/test/mjsunit/regress/regress-1060.js b/test/mjsunit/regress/regress-1060.js
new file mode 100644
index 0000000..8abe178
--- /dev/null
+++ b/test/mjsunit/regress/regress-1060.js
@@ -0,0 +1,32 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure that we do not record multiple bailouts in the unoptimized code
+// for the (shared) .arguments proxy, even for calls.
+function f(x) { arguments; return x() + x(); }
+
+assertEquals("hesthest", f(function () { return "hest"; }));
diff --git a/test/mjsunit/regress/regress-1079.js b/test/mjsunit/regress/regress-1079.js
new file mode 100644
index 0000000..f3f3ce5
--- /dev/null
+++ b/test/mjsunit/regress/regress-1079.js
@@ -0,0 +1,45 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Getting the arguments property of an optimized function should not crash,
+// even if called through our optimized version of Function.prototype.apply.
+
+function optimized() {
+ return unoptimized.apply(null, arguments);
+}
+
+// It's not crucial that this is unoptimized.
+function unoptimized() {
+ with ({}) {
+ return optimized.arguments;
+ }
+}
+
+for (var i = 0; i < 100000; ++i) {
+ assertEquals(3, optimized(1, 2, 3).length);
+}
+
diff --git a/test/mjsunit/regress/regress-1083.js b/test/mjsunit/regress/regress-1083.js
new file mode 100644
index 0000000..d231899
--- /dev/null
+++ b/test/mjsunit/regress/regress-1083.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that changing the generic descriptor flags on a property
+// on the global object doesn't break invariants.
+Object.defineProperty(this, 'Object', {enumerable:true});
+
+var desc = Object.getOwnPropertyDescriptor(this, 'Object');
+assertTrue(desc.enumerable);
+assertTrue(desc.configurable);
+assertFalse(desc.hasOwnProperty('get'));
+assertFalse(desc.hasOwnProperty('set'));
+assertTrue(desc.hasOwnProperty('value'));
+assertTrue(desc.writable);
diff --git a/test/mjsunit/regress/regress-1092.js b/test/mjsunit/regress/regress-1092.js
new file mode 100644
index 0000000..0b29231
--- /dev/null
+++ b/test/mjsunit/regress/regress-1092.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that CodeGenerator::EmitKeyedPropertyAssignment for the start
+// of an initialization block doesn't normalize the properties of the
+// JSGlobalProxy.
+this.w = 0;
+this.x = 1;
+this.y = 2;
+this.z = 3;
+
diff --git a/test/mjsunit/regress/regress-1099.js b/test/mjsunit/regress/regress-1099.js
new file mode 100644
index 0000000..0ed6ede
--- /dev/null
+++ b/test/mjsunit/regress/regress-1099.js
@@ -0,0 +1,46 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that LApplyArguments lithium instruction restores context after the call.
+
+function X() {
+ var slot = "foo"; return function (a) { return slot === a; }
+}
+
+function Y(x) {
+ var slot = "bar";
+ return function (a) {
+ x.apply(this, arguments);
+ return slot === 'bar';
+ };
+}
+
+var y = Y(X());
+
+for (var i = 0; i < 1000000; i++) {
+ assertTrue(y("foo"));
+}
diff --git a/test/mjsunit/regress/regress-1103.js b/test/mjsunit/regress/regress-1103.js
new file mode 100644
index 0000000..4ad25b3
--- /dev/null
+++ b/test/mjsunit/regress/regress-1103.js
@@ -0,0 +1,32 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that freezing the global object functions correctly and does not
+// freeze the global proxy.
+
+var obj = this;
+obj = Object.freeze(obj);
diff --git a/test/mjsunit/regress/regress-1104.js b/test/mjsunit/regress/regress-1104.js
new file mode 100644
index 0000000..aca0a66
--- /dev/null
+++ b/test/mjsunit/regress/regress-1104.js
@@ -0,0 +1,37 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A redeclaration of a variable that aliases a parameter and so rewrites to
+// an arguments object access should not record duplicate AST IDs for
+// bailout.
+function test(f) {
+ function f() {}
+ function f() {}
+ return arguments;
+}
+
+test();
diff --git a/test/mjsunit/regress/regress-1105.js b/test/mjsunit/regress/regress-1105.js
new file mode 100644
index 0000000..cfe2bd3
--- /dev/null
+++ b/test/mjsunit/regress/regress-1105.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This should properly catch the exception from the setter triggered
+// by the loaded file, and it should not fail an assertion in debug mode.
+
+__defineSetter__("x", function(){ throw 42; });
+
+try {
+ this.eval('function x(){}');
+ assertUnreachable();
+} catch (e) {
+ assertEquals(42, e);
+}
diff --git a/test/mjsunit/regress/regress-1106.js b/test/mjsunit/regress/regress-1106.js
new file mode 100644
index 0000000..382fd1b
--- /dev/null
+++ b/test/mjsunit/regress/regress-1106.js
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test for issue 1106, where the optimizing compiler broke when accessing
+// a property lying on a prototype of the global object, and that prototype
+// object was in dictionary mode.
+
+x = Object.prototype;
+x.foo = 3;
+x.bar = 4;
+delete x.foo;
+x.foo = 5;
+
+function f() { return foo; }
+
+for (i=0 ; i < 100000; ++i) {
+ assertEquals(5, f());
+}
+
+// Test calls on functions defined in the prototype of the global object.
+x.gee = function() { return 42; }
+function g() { return gee(); }
+
+for (i=0 ; i < 100000; ++i) {
+ assertEquals(42, g());
+}
diff --git a/test/mjsunit/regress/regress-1107.js b/test/mjsunit/regress/regress-1107.js
new file mode 100644
index 0000000..4ba277a
--- /dev/null
+++ b/test/mjsunit/regress/regress-1107.js
@@ -0,0 +1,32 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that even if we cannot set element 0 on all the objects, we still
+// can format exception messages to some extent.
+
+Object.prototype.__defineGetter__(0, function(){});
+assertThrows("x");
diff --git a/test/mjsunit/regress/regress-1110.js b/test/mjsunit/regress/regress-1110.js
new file mode 100644
index 0000000..204a87b
--- /dev/null
+++ b/test/mjsunit/regress/regress-1110.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that the illegal continue is thrown at parse time.
+
+try {
+ function Crash() { continue;if (Crash) {
+ } }
+ Crash();
+ assertTrue(false);
+} catch (e) {
+ assertTrue(e instanceof SyntaxError);
+ assertTrue(/continue/.test(e.message));
+}
diff --git a/test/mjsunit/regress/regress-1112.js b/test/mjsunit/regress/regress-1112.js
new file mode 100644
index 0000000..d780106
--- /dev/null
+++ b/test/mjsunit/regress/regress-1112.js
@@ -0,0 +1,36 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test making sure that defineProperty on the global proxy
+// defines the property on the global object.
+
+Object.defineProperty(this,
+ 1,
+ { configurable: true, enumerable: true, value: 3 });
+assertEquals(3, this[1]);
+assertTrue(this.hasOwnProperty("1"));
+
diff --git a/test/mjsunit/regress/regress-1117.js b/test/mjsunit/regress/regress-1117.js
new file mode 100644
index 0000000..b013a22
--- /dev/null
+++ b/test/mjsunit/regress/regress-1117.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we actually return the right value (-0) when we multiply
+// constant 0 with a negative integer.
+
+function foo(y) {return 0 * y; }
+for( var i = 0; i< 1000000; i++){
+ foo(42);
+}
+assertEquals(1/foo(-42), -Infinity);
diff --git a/test/mjsunit/regress/regress-1118.js b/test/mjsunit/regress/regress-1118.js
new file mode 100644
index 0000000..84f96e4
--- /dev/null
+++ b/test/mjsunit/regress/regress-1118.js
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// An exception thrown in a function optimized by on-stack replacement (OSR)
+// should be able to construct a receiver from all optimized stack frames.
+
+function A() { }
+A.prototype.f = function() { }
+
+function B() { }
+
+var o = new A();
+
+// This function throws if o does not have an f property, and should not be
+// inlined.
+function g() { try { return o.f(); } finally { }}
+
+// This function should be optimized via OSR.
+function h() {
+ while(false) ;
+ for (var j = 0; j < 5000000; j++) g();
+}
+
+h();
+o = new B();
+assertThrows("h()");
diff --git a/test/mjsunit/regress/regress-1119.js b/test/mjsunit/regress/regress-1119.js
new file mode 100644
index 0000000..484893c
--- /dev/null
+++ b/test/mjsunit/regress/regress-1119.js
@@ -0,0 +1,45 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test runtime declaration of properties with var which are intercepted
+// by JS accessors.
+
+__proto__.__defineSetter__("x", function() { hasBeenInvoked = true; });
+__proto__.__defineSetter__("y", function() { throw 'exception'; });
+
+var hasBeenInvoked = false;
+eval("try { } catch (e) { var x = false; }");
+assertTrue(hasBeenInvoked);
+
+var exception;
+try {
+ eval("try { } catch (e) { var y = false; }");
+ assertUnreachable();
+} catch (e) {
+ exception = e;
+}
+assertEquals('exception', exception);
diff --git a/test/mjsunit/regress/regress-1120.js b/test/mjsunit/regress/regress-1120.js
new file mode 100644
index 0000000..c8c06aa
--- /dev/null
+++ b/test/mjsunit/regress/regress-1120.js
@@ -0,0 +1,33 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that retrieving the extensible value for the global object is
+// working correctly and does not return the bit from the global proxy map.
+
+var obj = this;
+Object.freeze(obj);
+assertFalse(Object.isExtensible(obj));
diff --git a/test/mjsunit/regress/regress-1121.js b/test/mjsunit/regress/regress-1121.js
new file mode 100644
index 0000000..0ad29cc
--- /dev/null
+++ b/test/mjsunit/regress/regress-1121.js
@@ -0,0 +1,34 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=1121
+
+// Test that changing Array.prototype.__proto__ keeps Array functions working.
+
+Array.prototype.__proto__ = null;
+// pop has custom call generator, so we need some beefier function.
+assertEquals([1, 2, 3], [1, 2, 3].slice());
diff --git a/test/mjsunit/regress/regress-1122.js b/test/mjsunit/regress/regress-1122.js
new file mode 100644
index 0000000..7dc9b24
--- /dev/null
+++ b/test/mjsunit/regress/regress-1122.js
@@ -0,0 +1,55 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can handle functions with up to 32766 arguments, and that
+// functions with more arguments throw an exception.
+
+// See http://code.google.com/p/v8/issues/detail?id=1122.
+
+function function_with_n_args(n) {
+ test_prefix = 'prefix ';
+ test_suffix = ' suffix';
+ var source = 'test_prefix + (function f(';
+ for (var arg = 0; arg < n ; arg++) {
+ if (arg != 0) source += ',';
+ source += 'arg' + arg;
+ }
+ source += ') { return arg' + (n - n % 2) / 2 + '; })(';
+ for (var arg = 0; arg < n ; arg++) {
+ if (arg != 0) source += ',';
+ source += arg;
+ }
+ source += ') + test_suffix';
+ return eval(source);
+}
+
+assertEquals('prefix 4000 suffix', function_with_n_args(8000));
+assertEquals('prefix 9000 suffix', function_with_n_args(18000));
+assertEquals('prefix 16000 suffix', function_with_n_args(32000));
+
+assertThrows("function_with_n_args(35000)");
+assertThrows("function_with_n_args(100000)");
diff --git a/test/mjsunit/regress/regress-1125.js b/test/mjsunit/regress/regress-1125.js
new file mode 100644
index 0000000..b0e1cb7
--- /dev/null
+++ b/test/mjsunit/regress/regress-1125.js
@@ -0,0 +1,41 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test a lot of updates to freshly created contexts.
+
+function f(x, y) {
+ with ("abcdefghijxxxxxxxxxx")
+ var y = {};
+}
+
+function g() {
+ f.apply(this, arguments);
+}
+
+for (var i = 0; i < 150000; i++) {
+ g(i);
+}
diff --git a/test/mjsunit/regress/regress-1126.js b/test/mjsunit/regress/regress-1126.js
new file mode 100644
index 0000000..303583b
--- /dev/null
+++ b/test/mjsunit/regress/regress-1126.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This should properly catch the exception from the setter triggered
+// by the loaded file, and it should not fail an assertion in debug mode.
+
+try {
+ eval('--');
+ assertUnreachable();
+} catch (e) {
+}
diff --git a/test/mjsunit/regress/regress-1129.js b/test/mjsunit/regress/regress-1129.js
new file mode 100644
index 0000000..37bf9a8
--- /dev/null
+++ b/test/mjsunit/regress/regress-1129.js
@@ -0,0 +1,44 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --verify-heap --expose-gc
+
+// This should not hit an assertion in debug mode.
+
+// Create RegExp that is syntactically correct, but throws a stack overflow
+// during compilation.
+var source = Array(50000).join("(") + "a" + Array(50000).join(")");
+var r = RegExp(source);
+try {
+ // Try to compile in UC16 mode, and drop the exception.
+ r.test("\x80");
+ assertUnreachable();
+} catch (e) {
+}
+
+// Trigger a heap validation.
+gc();
diff --git a/test/mjsunit/regress/regress-1130.js b/test/mjsunit/regress/regress-1130.js
new file mode 100644
index 0000000..188f3f9
--- /dev/null
+++ b/test/mjsunit/regress/regress-1130.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that parser errors can be build up correctly even in the presence
+// of JS accessors on Object's prototype elements.
+
+Object.prototype.__defineGetter__(0, function() { throw 42; } );
+
+try {
+ eval("(function() { const x; var x })")();
+ assertUnreachable();
+} catch (e) {
+ assertTrue(e instanceof TypeError);
+}
diff --git a/test/mjsunit/regress/regress-1131.js b/test/mjsunit/regress/regress-1131.js
new file mode 100644
index 0000000..a1af9c9
--- /dev/null
+++ b/test/mjsunit/regress/regress-1131.js
@@ -0,0 +1,29 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var nonArray = { length: 4, 0: 42, 2: 37, 0xf7da5000: undefined, 4: 0 };
+Array.prototype.sort.call(nonArray);
diff --git a/test/mjsunit/regress/regress-1132.js b/test/mjsunit/regress/regress-1132.js
new file mode 100644
index 0000000..4423ecd
--- /dev/null
+++ b/test/mjsunit/regress/regress-1132.js
@@ -0,0 +1,48 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test the case when exception is thrown from the parser when lazy
+// compiling a function.
+
+// Flags: --stack_size=32
+// NOTE: stack size constant above has been empirically chosen.
+// If the test starts to fail in Genesis, consider increasing this constant.
+
+function test() {
+ try {
+ test(1, test(1));
+ } catch(e) {
+ assertFalse(delete e, "deleting catch variable");
+ assertEquals(42, e);
+ }
+}
+
+try {
+ test();
+ assertUnreachable();
+} catch (e) {
+}
diff --git a/test/mjsunit/regress/regress-1150.js b/test/mjsunit/regress/regress-1150.js
new file mode 100644
index 0000000..57f739a
--- /dev/null
+++ b/test/mjsunit/regress/regress-1150.js
@@ -0,0 +1,33 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that Object.keys is working correctly on the global object.
+
+var a = 10;
+var global = (function () { return this; }) ();
+var keys = Object.keys(global);
+assertTrue(keys.indexOf("a") > 0);
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3408144.js
new file mode 100644
index 0000000..6e292d6
--- /dev/null
+++ b/test/mjsunit/regress/regress-3408144.js
@@ -0,0 +1,37 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test incorrect code generation for alternations on ARM.
+
+
+// Flags: --nofull-compiler
+
+function foo() {
+ return (0 > ("10"||10) - 1);
+}
+
+assertFalse(foo());
diff --git a/test/mjsunit/regress/regress-70066.js b/test/mjsunit/regress/regress-70066.js
new file mode 100644
index 0000000..b8386a7
--- /dev/null
+++ b/test/mjsunit/regress/regress-70066.js
@@ -0,0 +1,146 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for Chromium issue 70066. Delete should work properly
+// from inside 'with' scopes.
+// http://code.google.com/p/chromium/issues/detail?id=70066
+
+x = 0;
+
+// Delete on a slot from a function's own context.
+function test1() {
+ var value = 1;
+ var status;
+ with ({}) { status = delete value; }
+ return value + ":" + status;
+}
+
+assertEquals("1:false", test1(), "test1");
+assertEquals(0, x, "test1"); // Global x is undisturbed.
+
+
+// Delete on a slot from an outer context.
+function test2() {
+ function f() {
+ with ({}) { return delete value; }
+ }
+ var value = 2;
+ var status = f();
+ return value + ":" + status;
+}
+
+assertEquals("2:false", test2(), "test2");
+assertEquals(0, x, "test2"); // Global x is undisturbed.
+
+
+// Delete on an argument. This hits the same code paths as test5 because
+// 'with' forces all parameters to be indirected through the arguments
+// object.
+function test3(value) {
+ var status;
+ with ({}) { status = delete value; }
+ return value + ":" + status;
+}
+
+assertEquals("undefined:true", test3(3), "test3");
+assertEquals(0, x, "test3"); // Global x is undisturbed.
+
+
+// Delete on an argument from an outer context. This hits the same code
+// path as test2.
+function test4(value) {
+ function f() {
+ with ({}) { return delete value; }
+ }
+ var status = f();
+ return value + ":" + status;
+}
+
+assertEquals("4:false", test4(4), "test4");
+assertEquals(0, x, "test4"); // Global x is undisturbed.
+
+
+// Delete on an argument found in the arguments object. Such properties are
+// normally DONT_DELETE in JavaScript but deletion is allowed by V8.
+function test5(value) {
+ var status;
+ with ({}) { status = delete value; }
+ return arguments[0] + ":" + status;
+}
+
+assertEquals("undefined:true", test5(5), "test5");
+assertEquals(0, x, "test5"); // Global x is undisturbed.
+
+function test6(value) {
+ function f() {
+ with ({}) { return delete value; }
+ }
+ var status = f();
+ return arguments[0] + ":" + status;
+}
+
+assertEquals("undefined:true", test6(6), "test6");
+assertEquals(0, x, "test6"); // Global x is undisturbed.
+
+
+// Delete on a property found on 'with' object.
+function test7(object) {
+ with (object) { return delete value; }
+}
+
+var o = {value: 7};
+assertEquals(true, test7(o), "test7");
+assertEquals(void 0, o.value, "test7");
+assertEquals(0, x, "test7"); // Global x is undisturbed.
+
+
+// Delete on a global property.
+function test8() {
+ with ({}) { return delete x; }
+}
+
+assertEquals(true, test8(), "test8");
+assertThrows("x", "test8"); // Global x should be deleted.
+
+
+// Delete on a property that is not found anywhere.
+function test9() {
+ with ({}) { return delete x; }
+}
+
+assertThrows("x", "test9"); // Make sure it's not there.
+assertEquals(true, test9(), "test9");
+
+
+// Delete on a DONT_DELETE property of the global object.
+var y = 10;
+function test10() {
+ with ({}) { return delete y; }
+}
+
+assertEquals(false, test10(), "test10");
+assertEquals(10, y, "test10");
diff --git a/test/mjsunit/regress/regress-71647.js b/test/mjsunit/regress/regress-71647.js
new file mode 100644
index 0000000..4451011
--- /dev/null
+++ b/test/mjsunit/regress/regress-71647.js
@@ -0,0 +1,34 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var qe = 'object';
+
+function g() {
+ for (var i = 0; i < 10000; i++) typeof i === qe;
+}
+
+g();
diff --git a/test/mjsunit/regress/regress-900966.js b/test/mjsunit/regress/regress-900966.js
index acffe75..99603c1 100644
--- a/test/mjsunit/regress/regress-900966.js
+++ b/test/mjsunit/regress/regress-900966.js
@@ -37,6 +37,8 @@
}
f();
f();
+f();
+f();
assertTrue(2[11] === undefined);
Number.prototype[11] = 'y';
diff --git a/test/mjsunit/regress/regress-992.js b/test/mjsunit/regress/regress-992.js
new file mode 100644
index 0000000..dbe25a5
--- /dev/null
+++ b/test/mjsunit/regress/regress-992.js
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Object.defineProperty with generic desc on existing property
+// should just update enumerable/configurable flags.
+
+var obj = { get p() { return 42; } };
+var desc = Object.getOwnPropertyDescriptor(obj, 'p');
+var getter = desc.get;
+
+Object.defineProperty(obj, 'p', {enumerable: false });
+assertEquals(obj.p, 42);
+desc = Object.getOwnPropertyDescriptor(obj, 'p');
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertEquals(desc.get, getter);
+assertEquals(desc.set, undefined);
+assertFalse(desc.hasOwnProperty('value'));
+assertFalse(desc.hasOwnProperty('writable'));
diff --git a/test/mjsunit/regress/regress-deopt-gc.js b/test/mjsunit/regress/regress-deopt-gc.js
new file mode 100644
index 0000000..7b7c29a
--- /dev/null
+++ b/test/mjsunit/regress/regress-deopt-gc.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// This tests that we can correctly handle a GC immediately after a function
+// has been deoptimized, even when we have an activation of this function on
+// the stack.
+
+// Ensure that there is code objects before the code for the opt_me function.
+(function() { var a = 10; a++; })();
+
+function opt_me() {
+ deopt();
+}
+
+function deopt() {
+ // Make sure we don't inline this function
+ try { var a = 42; } catch(o) {};
+ %DeoptimizeFunction(opt_me);
+ gc(true);
+}
+
+
+opt_me();
diff --git a/test/mjsunit/strict-mode-eval.js b/test/mjsunit/strict-mode-eval.js
new file mode 100644
index 0000000..018ed9e
--- /dev/null
+++ b/test/mjsunit/strict-mode-eval.js
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var code1 = "function f(eval) {}";
+var code2 = "function f(a, a) {}";
+var code3 = "var x = '\\020;'";
+var code4 = "function arguments() {}";
+
+// Verify the code compiles just fine in non-strict mode
+// (using aliased eval to force non-strict mode)
+var eval_alias = eval;
+
+eval_alias(code1);
+eval_alias(code2);
+eval_alias(code3);
+eval_alias(code4);
+
+function strict1() {
+ try {
+ eval(code1);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+
+ function strict2() {
+ try {
+ eval(code2);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+
+ function strict3() {
+ try {
+ eval(code3);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+
+ function strict4() {
+ try {
+ eval(code4);
+ assertUnreachable("did not throw exception");
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ }
+ }
+ strict4();
+ }
+ strict3();
+ }
+ strict2();
+}
+strict1();
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
new file mode 100644
index 0000000..6b775fc
--- /dev/null
+++ b/test/mjsunit/strict-mode.js
@@ -0,0 +1,376 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function CheckStrictMode(code, exception) {
+ assertDoesNotThrow(code);
+ assertThrows("'use strict';\n" + code, exception);
+ assertThrows('"use strict";\n' + code, exception);
+ assertDoesNotThrow("\
+ function outer() {\
+ function inner() {\n"
+ + code +
+ "\n}\
+ }");
+ assertThrows("\
+ function outer() {\
+ 'use strict';\
+ function inner() {\n"
+ + code +
+ "\n}\
+ }", exception);
+}
+
+function CheckFunctionConstructorStrictMode() {
+ var args = [];
+ for (var i = 0; i < arguments.length; i ++) {
+ args[i] = arguments[i];
+ }
+ // Create non-strict function. No exception.
+ args[arguments.length] = "";
+ assertDoesNotThrow(function() {
+ Function.apply(this, args);
+ });
+ // Create strict mode function. Exception expected.
+ args[arguments.length] = "'use strict';";
+ assertThrows(function() {
+ Function.apply(this, args);
+ }, SyntaxError);
+}
+
+// Incorrect 'use strict' directive.
+(function UseStrictEscape() {
+ "use\\x20strict";
+ with ({}) {};
+})();
+
+// 'use strict' in non-directive position.
+(function UseStrictNonDirective() {
+ void(0);
+ "use strict";
+ with ({}) {};
+})();
+
+// Multiple directives, including "use strict".
+assertThrows('\
+"directive 1";\
+"another directive";\
+"use strict";\
+"directive after strict";\
+"and one more";\
+with({}) {}', SyntaxError);
+
+// 'with' disallowed in strict mode.
+CheckStrictMode("with({}) {}", SyntaxError);
+
+// Function named 'eval'.
+CheckStrictMode("function eval() {}", SyntaxError);
+
+// Function named 'arguments'.
+CheckStrictMode("function arguments() {}", SyntaxError);
+
+// Function parameter named 'eval'.
+CheckStrictMode("function foo(a, b, eval, c, d) {}", SyntaxError);
+
+// Function parameter named 'arguments'.
+CheckStrictMode("function foo(a, b, arguments, c, d) {}", SyntaxError);
+
+// Property accessor parameter named 'eval'.
+CheckStrictMode("var o = { set foo(eval) {} }", SyntaxError);
+
+// Property accessor parameter named 'arguments'.
+CheckStrictMode("var o = { set foo(arguments) {} }", SyntaxError);
+
+// Duplicate function parameter name.
+CheckStrictMode("function foo(a, b, c, d, b) {}", SyntaxError);
+
+// Function constructor: eval parameter name.
+CheckFunctionConstructorStrictMode("eval");
+
+// Function constructor: arguments parameter name.
+CheckFunctionConstructorStrictMode("arguments");
+
+// Function constructor: duplicate parameter name.
+CheckFunctionConstructorStrictMode("a", "b", "c", "b");
+CheckFunctionConstructorStrictMode("a,b,c,b");
+
+// catch(eval)
+CheckStrictMode("try{}catch(eval){};", SyntaxError);
+
+// catch(arguments)
+CheckStrictMode("try{}catch(arguments){};", SyntaxError);
+
+// var eval
+CheckStrictMode("var eval;", SyntaxError);
+
+// var arguments
+CheckStrictMode("var arguments;", SyntaxError);
+
+// Strict mode applies to the function in which the directive is used..
+assertThrows('\
+function foo(eval) {\
+ "use strict";\
+}', SyntaxError);
+
+// Strict mode doesn't affect the outer stop of strict code.
+(function NotStrict(eval) {
+ function Strict() {
+ "use strict";
+ }
+ with ({}) {};
+})();
+
+// Octal literal
+CheckStrictMode("var x = 012");
+CheckStrictMode("012");
+CheckStrictMode("'Hello octal\\032'");
+CheckStrictMode("function octal() { return 012; }");
+CheckStrictMode("function octal() { return '\\032'; }");
+
+(function ValidEscape() {
+ "use strict";
+ var x = '\0';
+ var y = "\0";
+})();
+
+// Octal before "use strict"
+assertThrows('\
+ function strict() {\
+ "octal\\032directive";\
+ "use strict";\
+ }', SyntaxError);
+
+// Duplicate data properties.
+CheckStrictMode("var x = { dupe : 1, nondupe: 3, dupe : 2 };", SyntaxError);
+CheckStrictMode("var x = { '1234' : 1, '2345' : 2, '1234' : 3 };", SyntaxError);
+CheckStrictMode("var x = { '1234' : 1, '2345' : 2, 1234 : 3 };", SyntaxError);
+CheckStrictMode("var x = { 3.14 : 1, 2.71 : 2, 3.14 : 3 };", SyntaxError);
+CheckStrictMode("var x = { 3.14 : 1, '3.14' : 2 };", SyntaxError);
+CheckStrictMode("var x = { 123: 1, 123.00000000000000000000000000000000000000000000000000000000000000000001 : 2 }", SyntaxError);
+
+// Non-conflicting data properties.
+(function StrictModeNonDuplicate() {
+ "use strict";
+ var x = { 123 : 1, "0123" : 2 };
+ var x = { 123: 1, '123.00000000000000000000000000000000000000000000000000000000000000000001' : 2 }
+})();
+
+// Two getters (non-strict)
+assertThrows("var x = { get foo() { }, get foo() { } };", SyntaxError);
+assertThrows("var x = { get foo(){}, get 'foo'(){}};", SyntaxError);
+assertThrows("var x = { get 12(){}, get '12'(){}};", SyntaxError);
+
+// Two setters (non-strict)
+assertThrows("var x = { set foo(v) { }, set foo(v) { } };", SyntaxError);
+assertThrows("var x = { set foo(v) { }, set 'foo'(v) { } };", SyntaxError);
+assertThrows("var x = { set 13(v) { }, set '13'(v) { } };", SyntaxError);
+
+// Setter and data (non-strict)
+assertThrows("var x = { foo: 'data', set foo(v) { } };", SyntaxError);
+assertThrows("var x = { set foo(v) { }, foo: 'data' };", SyntaxError);
+assertThrows("var x = { foo: 'data', set 'foo'(v) { } };", SyntaxError);
+assertThrows("var x = { set foo(v) { }, 'foo': 'data' };", SyntaxError);
+assertThrows("var x = { 'foo': 'data', set foo(v) { } };", SyntaxError);
+assertThrows("var x = { set 'foo'(v) { }, foo: 'data' };", SyntaxError);
+assertThrows("var x = { 'foo': 'data', set 'foo'(v) { } };", SyntaxError);
+assertThrows("var x = { set 'foo'(v) { }, 'foo': 'data' };", SyntaxError);
+assertThrows("var x = { 12: 1, set '12'(v){}};", SyntaxError);
+assertThrows("var x = { 12: 1, set 12(v){}};", SyntaxError);
+assertThrows("var x = { '12': 1, set '12'(v){}};", SyntaxError);
+assertThrows("var x = { '12': 1, set 12(v){}};", SyntaxError);
+
+// Getter and data (non-strict)
+assertThrows("var x = { foo: 'data', get foo() { } };", SyntaxError);
+assertThrows("var x = { get foo() { }, foo: 'data' };", SyntaxError);
+assertThrows("var x = { 'foo': 'data', get foo() { } };", SyntaxError);
+assertThrows("var x = { get 'foo'() { }, 'foo': 'data' };", SyntaxError);
+assertThrows("var x = { '12': 1, get '12'(){}};", SyntaxError);
+assertThrows("var x = { '12': 1, get 12(){}};", SyntaxError);
+
+// Assignment to eval or arguments
+CheckStrictMode("function strict() { eval = undefined; }", SyntaxError);
+CheckStrictMode("function strict() { arguments = undefined; }", SyntaxError);
+CheckStrictMode("function strict() { print(eval = undefined); }", SyntaxError);
+CheckStrictMode("function strict() { print(arguments = undefined); }", SyntaxError);
+CheckStrictMode("function strict() { var x = eval = undefined; }", SyntaxError);
+CheckStrictMode("function strict() { var x = arguments = undefined; }", SyntaxError);
+
+// Compound assignment to eval or arguments
+CheckStrictMode("function strict() { eval *= undefined; }", SyntaxError);
+CheckStrictMode("function strict() { arguments /= undefined; }", SyntaxError);
+CheckStrictMode("function strict() { print(eval %= undefined); }", SyntaxError);
+CheckStrictMode("function strict() { print(arguments %= undefined); }", SyntaxError);
+CheckStrictMode("function strict() { var x = eval += undefined; }", SyntaxError);
+CheckStrictMode("function strict() { var x = arguments -= undefined; }", SyntaxError);
+CheckStrictMode("function strict() { eval <<= undefined; }", SyntaxError);
+CheckStrictMode("function strict() { arguments >>= undefined; }", SyntaxError);
+CheckStrictMode("function strict() { print(eval >>>= undefined); }", SyntaxError);
+CheckStrictMode("function strict() { print(arguments &= undefined); }", SyntaxError);
+CheckStrictMode("function strict() { var x = eval ^= undefined; }", SyntaxError);
+CheckStrictMode("function strict() { var x = arguments |= undefined; }", SyntaxError);
+
+// Postfix increment with eval or arguments
+CheckStrictMode("function strict() { eval++; }", SyntaxError);
+CheckStrictMode("function strict() { arguments++; }", SyntaxError);
+CheckStrictMode("function strict() { print(eval++); }", SyntaxError);
+CheckStrictMode("function strict() { print(arguments++); }", SyntaxError);
+CheckStrictMode("function strict() { var x = eval++; }", SyntaxError);
+CheckStrictMode("function strict() { var x = arguments++; }", SyntaxError);
+
+// Postfix decrement with eval or arguments
+CheckStrictMode("function strict() { eval--; }", SyntaxError);
+CheckStrictMode("function strict() { arguments--; }", SyntaxError);
+CheckStrictMode("function strict() { print(eval--); }", SyntaxError);
+CheckStrictMode("function strict() { print(arguments--); }", SyntaxError);
+CheckStrictMode("function strict() { var x = eval--; }", SyntaxError);
+CheckStrictMode("function strict() { var x = arguments--; }", SyntaxError);
+
+// Prefix increment with eval or arguments
+CheckStrictMode("function strict() { ++eval; }", SyntaxError);
+CheckStrictMode("function strict() { ++arguments; }", SyntaxError);
+CheckStrictMode("function strict() { print(++eval); }", SyntaxError);
+CheckStrictMode("function strict() { print(++arguments); }", SyntaxError);
+CheckStrictMode("function strict() { var x = ++eval; }", SyntaxError);
+CheckStrictMode("function strict() { var x = ++arguments; }", SyntaxError);
+
+// Prefix decrement with eval or arguments
+CheckStrictMode("function strict() { --eval; }", SyntaxError);
+CheckStrictMode("function strict() { --arguments; }", SyntaxError);
+CheckStrictMode("function strict() { print(--eval); }", SyntaxError);
+CheckStrictMode("function strict() { print(--arguments); }", SyntaxError);
+CheckStrictMode("function strict() { var x = --eval; }", SyntaxError);
+CheckStrictMode("function strict() { var x = --arguments; }", SyntaxError);
+
+// Prefix unary operators other than delete, ++, -- are valid in strict mode
+(function StrictModeUnaryOperators() {
+ "use strict";
+ var x = [void eval, typeof eval, +eval, -eval, ~eval, !eval];
+ var y = [void arguments, typeof arguments,
+ +arguments, -arguments, ~arguments, !arguments];
+})();
+
+// 7.6.1.2 Future Reserved Words
+var future_reserved_words = [
+ "class",
+ "enum",
+ "export",
+ "extends",
+ "import",
+ "super",
+ "implements",
+ "interface",
+ "let",
+ "package",
+ "private",
+ "protected",
+ "public",
+ "static",
+ "yield" ];
+
+function testFutureReservedWord(word) {
+ // Simple use of each reserved word
+ CheckStrictMode("var " + word + " = 1;", SyntaxError);
+
+ // object literal properties
+ eval("var x = { " + word + " : 42 };");
+ eval("var x = { get " + word + " () {} };");
+ eval("var x = { set " + word + " (value) {} };");
+
+ // object literal with string literal property names
+ eval("var x = { '" + word + "' : 42 };");
+ eval("var x = { get '" + word + "' () { } };");
+ eval("var x = { set '" + word + "' (value) { } };");
+ eval("var x = { get '" + word + "' () { 'use strict'; } };");
+ eval("var x = { set '" + word + "' (value) { 'use strict'; } };");
+
+ // Function names and arguments, strict and non-strict contexts
+ CheckStrictMode("function " + word + " () {}", SyntaxError);
+ CheckStrictMode("function foo (" + word + ") {}", SyntaxError);
+ CheckStrictMode("function foo (" + word + ", " + word + ") {}", SyntaxError);
+ CheckStrictMode("function foo (a, " + word + ") {}", SyntaxError);
+ CheckStrictMode("function foo (" + word + ", a) {}", SyntaxError);
+ CheckStrictMode("function foo (a, " + word + ", b) {}", SyntaxError);
+ CheckStrictMode("var foo = function (" + word + ") {}", SyntaxError);
+
+ // Function names and arguments when the body is strict
+ assertThrows("function " + word + " () { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (" + word + ") 'use strict'; {}", SyntaxError);
+ assertThrows("function foo (" + word + ", " + word + ") { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (a, " + word + ") { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (" + word + ", a) { 'use strict'; }", SyntaxError);
+ assertThrows("function foo (a, " + word + ", b) { 'use strict'; }", SyntaxError);
+ assertThrows("var foo = function (" + word + ") { 'use strict'; }", SyntaxError);
+
+ // get/set when the body is strict
+ eval("var x = { get " + word + " () { 'use strict'; } };");
+ eval("var x = { set " + word + " (value) { 'use strict'; } };");
+ assertThrows("var x = { get foo(" + word + ") { 'use strict'; } };", SyntaxError);
+ assertThrows("var x = { set foo(" + word + ") { 'use strict'; } };", SyntaxError);
+}
+
+for (var i = 0; i < future_reserved_words.length; i++) {
+ testFutureReservedWord(future_reserved_words[i]);
+}
+
+function testAssignToUndefined(should_throw) {
+ "use strict";
+ try {
+ possibly_undefined_variable_for_strict_mode_test = "should throw?";
+ } catch (e) {
+ assertTrue(should_throw, "strict mode");
+ assertInstanceof(e, ReferenceError, "strict mode");
+ return;
+ }
+ assertFalse(should_throw, "strict mode");
+}
+
+testAssignToUndefined(true);
+testAssignToUndefined(true);
+testAssignToUndefined(true);
+
+possibly_undefined_variable_for_strict_mode_test = "value";
+
+testAssignToUndefined(false);
+testAssignToUndefined(false);
+testAssignToUndefined(false);
+
+delete possibly_undefined_variable_for_strict_mode_test;
+
+testAssignToUndefined(true);
+testAssignToUndefined(true);
+testAssignToUndefined(true);
+
+function repeat(n, f) {
+ for (var i = 0; i < n; i ++) { f(); }
+}
+
+repeat(10, function() { testAssignToUndefined(true); });
+possibly_undefined_variable_for_strict_mode_test = "value";
+repeat(10, function() { testAssignToUndefined(false); });
+delete possibly_undefined_variable_for_strict_mode_test;
+repeat(10, function() { testAssignToUndefined(true); });
+possibly_undefined_variable_for_strict_mode_test = undefined;
+repeat(10, function() { testAssignToUndefined(false); });
diff --git a/test/mjsunit/string-charcodeat.js b/test/mjsunit/string-charcodeat.js
index 831f688..f18d0a5 100644
--- a/test/mjsunit/string-charcodeat.js
+++ b/test/mjsunit/string-charcodeat.js
@@ -153,6 +153,17 @@
TestStringType(Flat16, true);
TestStringType(NotAString16, true);
+
+function ConsNotSmiIndex() {
+ var str = Cons();
+ assertTrue(isNaN(str.charCodeAt(0x7fffffff)));
+}
+
+for (var i = 0; i < 100000; i++) {
+ ConsNotSmiIndex();
+}
+
+
for (var i = 0; i != 10; i++) {
assertEquals(101, Cons16().charCodeAt(1.1));
assertEquals('e', Cons16().charAt(1.1));
@@ -194,3 +205,23 @@
assertEquals(49, long.charCodeAt(0), 36);
assertEquals(56, long.charCodeAt(65535), 37);
assertTrue(isNaN(long.charCodeAt(65536)), 38);
+
+
+// Test crankshaft code when the function is set directly on the
+// string prototype object instead of the hidden prototype object.
+// See http://code.google.com/p/v8/issues/detail?id=1070
+
+String.prototype.x = String.prototype.charCodeAt;
+
+function directlyOnPrototype() {
+ assertEquals(97, "a".x(0));
+ assertEquals(98, "b".x(0));
+ assertEquals(99, "c".x(0));
+ assertEquals(97, "a".x(0));
+ assertEquals(98, "b".x(0));
+ assertEquals(99, "c".x(0));
+}
+
+for (var i = 0; i < 10000; i++) {
+ directlyOnPrototype();
+}
diff --git a/test/mjsunit/tools/codemap.js b/test/mjsunit/tools/codemap.js
index 06a91e8..81fb810 100644
--- a/test/mjsunit/tools/codemap.js
+++ b/test/mjsunit/tools/codemap.js
@@ -30,7 +30,7 @@
function newCodeEntry(size, name) {
- return new devtools.profiler.CodeMap.CodeEntry(size, name);
+ return new CodeMap.CodeEntry(size, name);
};
@@ -47,7 +47,7 @@
(function testLibrariesAndStaticCode() {
- var codeMap = new devtools.profiler.CodeMap();
+ var codeMap = new CodeMap();
codeMap.addLibrary(0x1500, newCodeEntry(0x3000, 'lib1'));
codeMap.addLibrary(0x15500, newCodeEntry(0x5000, 'lib2'));
codeMap.addLibrary(0x155500, newCodeEntry(0x10000, 'lib3'));
@@ -97,7 +97,7 @@
(function testDynamicCode() {
- var codeMap = new devtools.profiler.CodeMap();
+ var codeMap = new CodeMap();
codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
codeMap.addCode(0x1900, newCodeEntry(0x50, 'code3'));
@@ -123,7 +123,7 @@
(function testCodeMovesAndDeletions() {
- var codeMap = new devtools.profiler.CodeMap();
+ var codeMap = new CodeMap();
codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
assertEntry(codeMap, 'code1', 0x1500);
@@ -139,7 +139,7 @@
(function testDynamicNamesDuplicates() {
- var codeMap = new devtools.profiler.CodeMap();
+ var codeMap = new CodeMap();
// Code entries with same names but different addresses.
codeMap.addCode(0x1500, newCodeEntry(0x200, 'code'));
codeMap.addCode(0x1700, newCodeEntry(0x100, 'code'));
@@ -152,7 +152,7 @@
(function testStaticEntriesExport() {
- var codeMap = new devtools.profiler.CodeMap();
+ var codeMap = new CodeMap();
codeMap.addStaticCode(0x1500, newCodeEntry(0x3000, 'lib1'));
codeMap.addStaticCode(0x15500, newCodeEntry(0x5000, 'lib2'));
codeMap.addStaticCode(0x155500, newCodeEntry(0x10000, 'lib3'));
@@ -163,7 +163,7 @@
(function testDynamicEntriesExport() {
- var codeMap = new devtools.profiler.CodeMap();
+ var codeMap = new CodeMap();
codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
codeMap.addCode(0x1900, newCodeEntry(0x50, 'code3'));
diff --git a/test/mjsunit/tools/csvparser.js b/test/mjsunit/tools/csvparser.js
index 6ac4908..f1449f6 100644
--- a/test/mjsunit/tools/csvparser.js
+++ b/test/mjsunit/tools/csvparser.js
@@ -28,7 +28,7 @@
// Load CSV parser implementation from <project root>/tools.
// Files: tools/csvparser.js
-var parser = new devtools.profiler.CsvParser();
+var parser = new CsvParser();
assertEquals(
[],
diff --git a/test/mjsunit/tools/profile.js b/test/mjsunit/tools/profile.js
index 9ed851b..4df1a08 100644
--- a/test/mjsunit/tools/profile.js
+++ b/test/mjsunit/tools/profile.js
@@ -58,7 +58,7 @@
function ProfileTestDriver() {
- this.profile = new devtools.profiler.Profile();
+ this.profile = new Profile();
this.stack_ = [];
this.addFunctions_();
};
diff --git a/test/mjsunit/tools/profile_view.js b/test/mjsunit/tools/profile_view.js
index 3ed1128..7f60119 100644
--- a/test/mjsunit/tools/profile_view.js
+++ b/test/mjsunit/tools/profile_view.js
@@ -30,7 +30,7 @@
function createNode(name, time, opt_parent) {
- var node = new devtools.profiler.ProfileView.Node(name, time, time, null);
+ var node = new ProfileView.Node(name, time, time, null);
if (opt_parent) {
opt_parent.addChild(node);
}
@@ -61,7 +61,7 @@
createNode('d', 4, b3);
createNode('d', 2, b3);
- var view = new devtools.profiler.ProfileView(root);
+ var view = new ProfileView(root);
var flatTree = [];
function fillFlatTree(node) {
diff --git a/test/mjsunit/tools/splaytree.js b/test/mjsunit/tools/splaytree.js
index 3beba0b..5e18796 100644
--- a/test/mjsunit/tools/splaytree.js
+++ b/test/mjsunit/tools/splaytree.js
@@ -30,7 +30,7 @@
(function testIsEmpty() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
assertTrue(tree.isEmpty());
tree.insert(0, 'value');
assertFalse(tree.isEmpty());
@@ -38,7 +38,7 @@
(function testExportValues() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
assertArrayEquals([], tree.exportValues());
tree.insert(0, 'value');
assertArrayEquals(['value'], tree.exportValues());
@@ -79,7 +79,7 @@
(function testSplay() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
tree.root_ = createSampleTree();
assertArrayEquals(['50', '30', '60', '10', '40', '90', '20', '70', '100', '15', '80'],
tree.exportValues());
@@ -93,7 +93,7 @@
(function testInsert() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
tree.insert(5, 'root');
tree.insert(3, 'left');
assertArrayEquals(['left', 'root'], tree.exportValues());
@@ -103,7 +103,7 @@
(function testFind() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
tree.insert(5, 'root');
tree.insert(3, 'left');
tree.insert(7, 'right');
@@ -117,7 +117,7 @@
(function testFindMin() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
assertEquals(null, tree.findMin());
tree.insert(5, 'root');
tree.insert(3, 'left');
@@ -127,7 +127,7 @@
(function testFindMax() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
assertEquals(null, tree.findMax());
tree.insert(5, 'root');
tree.insert(3, 'left');
@@ -137,7 +137,7 @@
(function testFindGreatestLessThan() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
assertEquals(null, tree.findGreatestLessThan(10));
tree.insert(5, 'root');
tree.insert(3, 'left');
@@ -151,7 +151,7 @@
(function testRemove() {
- var tree = new goog.structs.SplayTree();
+ var tree = new SplayTree();
assertThrows('tree.remove(5)');
tree.insert(5, 'root');
tree.insert(3, 'left');
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index a119bf2..3b6a524 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -110,7 +110,7 @@
js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
# http://b/issue?id=1206983
js1_5/Regress/regress-367561-03: PASS || FAIL if $mode == debug
-ecma/Date/15.9.5.10-2: PASS || FAIL if $mode == debug
+ecma/Date/15.9.5.10-2: PASS || (FAIL || TIMEOUT if $mode == debug)
# These tests create two Date objects just after each other and
# expects them to match. Sometimes this happens on the border
@@ -169,7 +169,6 @@
# In Denmark the adjustment starts one week earlier!.
# Tests based on shell that use dates in this gap are flaky.
ecma/Date/15.9.5.10-1: PASS || FAIL
-ecma/Date/15.9.5.10-2: PASS || TIMEOUT if ($arch == arm && $mode == debug)
ecma/Date/15.9.5.12-1: PASS || FAIL
ecma/Date/15.9.5.14: PASS || FAIL
ecma/Date/15.9.5.34-1: PASS || FAIL
@@ -198,6 +197,9 @@
js1_5/extensions/regress-363258: PASS || FAIL
+# Test that assumes specific runtime for a regexp, flaky in debug mode.
+ecma_3/RegExp/regress-85721: PASS || FAIL if $mode == debug
+
##################### INCOMPATIBLE TESTS #####################
@@ -818,20 +820,17 @@
js1_5/extensions/regress-342960: SKIP
# BUG(3251229): Times out when running new crankshaft test script.
-ecma/Date/15.9.5.12-2: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Date/15.9.5.10-2: SKIP
-ecma/Date/15.9.5.8: SKIP
ecma_3/RegExp/regress-311414: SKIP
-js1_5/Array/regress-99120-02: SKIP
-js1_5/Regress/regress-203278-1: SKIP
ecma/Date/15.9.5.8: SKIP
ecma/Date/15.9.5.10-2: SKIP
ecma/Date/15.9.5.11-2: SKIP
ecma/Date/15.9.5.12-2: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/extensions/regress-371636: SKIP
+js1_5/Regress/regress-203278-1: SKIP
js1_5/Regress/regress-404755: SKIP
js1_5/Regress/regress-451322: SKIP
-js1_5/extensions/regress-371636: SKIP
+
# BUG(1040): Allow this test to timeout.
js1_5/GC/regress-203278-2: PASS || TIMEOUT
@@ -862,7 +861,6 @@
ecma/Date/15.9.5.12-2: SKIP
ecma/Date/15.9.5.8: SKIP
ecma/Date/15.9.5.9: SKIP
-ecma/Date/15.9.5.10-2: SKIP
ecma/Date/15.9.5.11-2: SKIP
ecma/Expressions/11.7.2: SKIP
ecma/Expressions/11.10-2: SKIP
diff --git a/tools/codemap.js b/tools/codemap.js
index 8eb2acb..71a99cc 100644
--- a/tools/codemap.js
+++ b/tools/codemap.js
@@ -26,36 +26,31 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Initlialize namespaces
-var devtools = devtools || {};
-devtools.profiler = devtools.profiler || {};
-
-
/**
* Constructs a mapper that maps addresses into code entries.
*
* @constructor
*/
-devtools.profiler.CodeMap = function() {
+function CodeMap() {
/**
* Dynamic code entries. Used for JIT compiled code.
*/
- this.dynamics_ = new goog.structs.SplayTree();
+ this.dynamics_ = new SplayTree();
/**
* Name generator for entries having duplicate names.
*/
- this.dynamicsNameGen_ = new devtools.profiler.CodeMap.NameGenerator();
+ this.dynamicsNameGen_ = new CodeMap.NameGenerator();
/**
* Static code entries. Used for statically compiled code.
*/
- this.statics_ = new goog.structs.SplayTree();
+ this.statics_ = new SplayTree();
/**
* Libraries entries. Used for the whole static code libraries.
*/
- this.libraries_ = new goog.structs.SplayTree();
+ this.libraries_ = new SplayTree();
/**
* Map of memory pages occupied with static code.
@@ -67,23 +62,23 @@
/**
* The number of alignment bits in a page address.
*/
-devtools.profiler.CodeMap.PAGE_ALIGNMENT = 12;
+CodeMap.PAGE_ALIGNMENT = 12;
/**
* Page size in bytes.
*/
-devtools.profiler.CodeMap.PAGE_SIZE =
- 1 << devtools.profiler.CodeMap.PAGE_ALIGNMENT;
+CodeMap.PAGE_SIZE =
+ 1 << CodeMap.PAGE_ALIGNMENT;
/**
* Adds a dynamic (i.e. moveable and discardable) code entry.
*
* @param {number} start The starting address.
- * @param {devtools.profiler.CodeMap.CodeEntry} codeEntry Code entry object.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
*/
-devtools.profiler.CodeMap.prototype.addCode = function(start, codeEntry) {
+CodeMap.prototype.addCode = function(start, codeEntry) {
this.dynamics_.insert(start, codeEntry);
};
@@ -95,7 +90,7 @@
* @param {number} from The starting address of the entry being moved.
* @param {number} to The destination address.
*/
-devtools.profiler.CodeMap.prototype.moveCode = function(from, to) {
+CodeMap.prototype.moveCode = function(from, to) {
var removedNode = this.dynamics_.remove(from);
this.dynamics_.insert(to, removedNode.value);
};
@@ -107,7 +102,7 @@
*
* @param {number} start The starting address of the entry being deleted.
*/
-devtools.profiler.CodeMap.prototype.deleteCode = function(start) {
+CodeMap.prototype.deleteCode = function(start) {
var removedNode = this.dynamics_.remove(start);
};
@@ -116,9 +111,9 @@
* Adds a library entry.
*
* @param {number} start The starting address.
- * @param {devtools.profiler.CodeMap.CodeEntry} codeEntry Code entry object.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
*/
-devtools.profiler.CodeMap.prototype.addLibrary = function(
+CodeMap.prototype.addLibrary = function(
start, codeEntry) {
this.markPages_(start, start + codeEntry.size);
this.libraries_.insert(start, codeEntry);
@@ -129,9 +124,9 @@
* Adds a static code entry.
*
* @param {number} start The starting address.
- * @param {devtools.profiler.CodeMap.CodeEntry} codeEntry Code entry object.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
*/
-devtools.profiler.CodeMap.prototype.addStaticCode = function(
+CodeMap.prototype.addStaticCode = function(
start, codeEntry) {
this.statics_.insert(start, codeEntry);
};
@@ -140,10 +135,10 @@
/**
* @private
*/
-devtools.profiler.CodeMap.prototype.markPages_ = function(start, end) {
+CodeMap.prototype.markPages_ = function(start, end) {
for (var addr = start; addr <= end;
- addr += devtools.profiler.CodeMap.PAGE_SIZE) {
- this.pages_[addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT] = 1;
+ addr += CodeMap.PAGE_SIZE) {
+ this.pages_[addr >>> CodeMap.PAGE_ALIGNMENT] = 1;
}
};
@@ -151,7 +146,7 @@
/**
* @private
*/
-devtools.profiler.CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
+CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
return addr >= node.key && addr < (node.key + node.value.size);
};
@@ -159,7 +154,7 @@
/**
* @private
*/
-devtools.profiler.CodeMap.prototype.findInTree_ = function(tree, addr) {
+CodeMap.prototype.findInTree_ = function(tree, addr) {
var node = tree.findGreatestLessThan(addr);
return node && this.isAddressBelongsTo_(addr, node) ? node.value : null;
};
@@ -171,8 +166,8 @@
*
* @param {number} addr Address.
*/
-devtools.profiler.CodeMap.prototype.findEntry = function(addr) {
- var pageAddr = addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT;
+CodeMap.prototype.findEntry = function(addr) {
+ var pageAddr = addr >>> CodeMap.PAGE_ALIGNMENT;
if (pageAddr in this.pages_) {
// Static code entries can contain "holes" of unnamed code.
// In this case, the whole library is assigned to this address.
@@ -200,7 +195,7 @@
*
* @param {number} addr Address.
*/
-devtools.profiler.CodeMap.prototype.findDynamicEntryByStartAddress =
+CodeMap.prototype.findDynamicEntryByStartAddress =
function(addr) {
var node = this.dynamics_.find(addr);
return node ? node.value : null;
@@ -210,7 +205,7 @@
/**
* Returns an array of all dynamic code entries.
*/
-devtools.profiler.CodeMap.prototype.getAllDynamicEntries = function() {
+CodeMap.prototype.getAllDynamicEntries = function() {
return this.dynamics_.exportValues();
};
@@ -218,7 +213,7 @@
/**
* Returns an array of all static code entries.
*/
-devtools.profiler.CodeMap.prototype.getAllStaticEntries = function() {
+CodeMap.prototype.getAllStaticEntries = function() {
return this.statics_.exportValues();
};
@@ -226,7 +221,7 @@
/**
* Returns an array of all libraries entries.
*/
-devtools.profiler.CodeMap.prototype.getAllLibrariesEntries = function() {
+CodeMap.prototype.getAllLibrariesEntries = function() {
return this.libraries_.exportValues();
};
@@ -238,29 +233,29 @@
* @param {string} opt_name Code entry name.
* @constructor
*/
-devtools.profiler.CodeMap.CodeEntry = function(size, opt_name) {
+CodeMap.CodeEntry = function(size, opt_name) {
this.size = size;
this.name = opt_name || '';
this.nameUpdated_ = false;
};
-devtools.profiler.CodeMap.CodeEntry.prototype.getName = function() {
+CodeMap.CodeEntry.prototype.getName = function() {
return this.name;
};
-devtools.profiler.CodeMap.CodeEntry.prototype.toString = function() {
+CodeMap.CodeEntry.prototype.toString = function() {
return this.name + ': ' + this.size.toString(16);
};
-devtools.profiler.CodeMap.NameGenerator = function() {
+CodeMap.NameGenerator = function() {
this.knownNames_ = {};
};
-devtools.profiler.CodeMap.NameGenerator.prototype.getName = function(name) {
+CodeMap.NameGenerator.prototype.getName = function(name) {
if (!(name in this.knownNames_)) {
this.knownNames_[name] = 0;
return name;
diff --git a/tools/csvparser.js b/tools/csvparser.js
index 6e101e2..c7d46b5 100644
--- a/tools/csvparser.js
+++ b/tools/csvparser.js
@@ -26,15 +26,10 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Initlialize namespaces.
-var devtools = devtools || {};
-devtools.profiler = devtools.profiler || {};
-
-
/**
* Creates a CSV lines parser.
*/
-devtools.profiler.CsvParser = function() {
+function CsvParser() {
};
@@ -42,14 +37,14 @@
* A regex for matching a CSV field.
* @private
*/
-devtools.profiler.CsvParser.CSV_FIELD_RE_ = /^"((?:[^"]|"")*)"|([^,]*)/;
+CsvParser.CSV_FIELD_RE_ = /^"((?:[^"]|"")*)"|([^,]*)/;
/**
* A regex for matching a double quote.
* @private
*/
-devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_ = /""/g;
+CsvParser.DOUBLE_QUOTE_RE_ = /""/g;
/**
@@ -57,9 +52,9 @@
*
* @param {string} line Input line.
*/
-devtools.profiler.CsvParser.prototype.parseLine = function(line) {
- var fieldRe = devtools.profiler.CsvParser.CSV_FIELD_RE_;
- var doubleQuoteRe = devtools.profiler.CsvParser.DOUBLE_QUOTE_RE_;
+CsvParser.prototype.parseLine = function(line) {
+ var fieldRe = CsvParser.CSV_FIELD_RE_;
+ var doubleQuoteRe = CsvParser.DOUBLE_QUOTE_RE_;
var pos = 0;
var endPos = line.length;
var fields = [];
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 024ecd7..1518567 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -32,6 +32,7 @@
'gcc_version%': 'unknown',
'v8_target_arch%': '<(target_arch)',
'v8_use_snapshot%': 'true',
+ 'v8_use_liveobjectlist%': 'false',
},
'conditions': [
['use_system_v8==0', {
@@ -66,6 +67,14 @@
}],
],
}],
+ ['v8_use_liveobjectlist=="true"', {
+ 'defines': [
+ 'ENABLE_DEBUGGER_SUPPORT',
+ 'INSPECTOR',
+ 'OBJECT_PRINT',
+ 'LIVEOBJECTLIST',
+ ],
+ }],
],
'configurations': {
'Debug': {
@@ -417,6 +426,8 @@
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
+ '../../src/inspector.cc',
+ '../../src/inspector.h',
'../../src/interpreter-irregexp.cc',
'../../src/interpreter-irregexp.h',
'../../src/jump-target-inl.h',
@@ -430,8 +441,12 @@
'../../src/lithium.h',
'../../src/lithium-allocator.cc',
'../../src/lithium-allocator.h',
+ '../../src/lithium-allocator-inl.h',
'../../src/liveedit.cc',
'../../src/liveedit.h',
+ '../../src/liveobjectlist-inl.h',
+ '../../src/liveobjectlist.cc',
+ '../../src/liveobjectlist.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
@@ -451,8 +466,6 @@
'../../src/objects-visiting.h',
'../../src/objects.cc',
'../../src/objects.h',
- '../../src/oprofile-agent.h',
- '../../src/oprofile-agent.cc',
'../../src/parser.cc',
'../../src/parser.h',
'../../src/platform.h',
@@ -677,6 +690,8 @@
'../../src/x64/jump-target-x64.cc',
'../../src/x64/lithium-codegen-x64.cc',
'../../src/x64/lithium-codegen-x64.h',
+ '../../src/x64/lithium-gap-resolver-x64.cc',
+ '../../src/x64/lithium-gap-resolver-x64.h',
'../../src/x64/lithium-x64.cc',
'../../src/x64/lithium-x64.h',
'../../src/x64/macro-assembler-x64.cc',
@@ -733,8 +748,7 @@
'sources': [
'../../src/platform-win32.cc',
],
- # 4355, 4800 came from common.vsprops
- 'msvs_disabled_warnings': [4355, 4800],
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
'link_settings': {
'libraries': [ '-lwinmm.lib' ],
},
diff --git a/tools/logreader.js b/tools/logreader.js
index 50e3aa4..315e721 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -29,10 +29,6 @@
* @fileoverview Log Reader is used to process log file produced by V8.
*/
-// Initlialize namespaces
-var devtools = devtools || {};
-devtools.profiler = devtools.profiler || {};
-
/**
* Base class for processing log files.
@@ -41,7 +37,7 @@
* log records.
* @constructor
*/
-devtools.profiler.LogReader = function(dispatchTable) {
+function LogReader(dispatchTable) {
/**
* @type {Array.<Object>}
*/
@@ -55,9 +51,9 @@
/**
* CSV lines parser.
- * @type {devtools.profiler.CsvParser}
+ * @type {CsvParser}
*/
- this.csvParser_ = new devtools.profiler.CsvParser();
+ this.csvParser_ = new CsvParser();
};
@@ -66,7 +62,7 @@
*
* @param {string} str Error message.
*/
-devtools.profiler.LogReader.prototype.printError = function(str) {
+LogReader.prototype.printError = function(str) {
// Do nothing.
};
@@ -76,7 +72,7 @@
*
* @param {string} chunk A portion of log.
*/
-devtools.profiler.LogReader.prototype.processLogChunk = function(chunk) {
+LogReader.prototype.processLogChunk = function(chunk) {
this.processLog_(chunk.split('\n'));
};
@@ -86,7 +82,7 @@
*
* @param {string} line A line of log.
*/
-devtools.profiler.LogReader.prototype.processLogLine = function(line) {
+LogReader.prototype.processLogLine = function(line) {
this.processLog_([line]);
};
@@ -99,7 +95,7 @@
* @param {Array.<string>} stack String representation of a stack.
* @return {Array.<number>} Processed stack.
*/
-devtools.profiler.LogReader.prototype.processStack = function(pc, func, stack) {
+LogReader.prototype.processStack = function(pc, func, stack) {
var fullStack = func ? [pc, func] : [pc];
var prevFrame = pc;
for (var i = 0, n = stack.length; i < n; ++i) {
@@ -124,7 +120,7 @@
* @param {!Object} dispatch Dispatch record.
* @return {boolean} True if dispatch must be skipped.
*/
-devtools.profiler.LogReader.prototype.skipDispatch = function(dispatch) {
+LogReader.prototype.skipDispatch = function(dispatch) {
return false;
};
@@ -135,7 +131,7 @@
* @param {Array.<string>} fields Log record.
* @private
*/
-devtools.profiler.LogReader.prototype.dispatchLogRow_ = function(fields) {
+LogReader.prototype.dispatchLogRow_ = function(fields) {
// Obtain the dispatch.
var command = fields[0];
if (!(command in this.dispatchTable_)) {
@@ -173,7 +169,7 @@
* @param {Array.<string>} lines Log lines.
* @private
*/
-devtools.profiler.LogReader.prototype.processLog_ = function(lines) {
+LogReader.prototype.processLog_ = function(lines) {
for (var i = 0, n = lines.length; i < n; ++i, ++this.lineNum_) {
var line = lines[i];
if (!line) {
diff --git a/tools/oprofile/annotate b/tools/oprofile/annotate
deleted file mode 100755
index a6a8545..0000000
--- a/tools/oprofile/annotate
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-opannotate --assembly --session-dir="$OPROFILE_SESSION_DIR" "$shell_exec" "$@"
-
diff --git a/tools/oprofile/common b/tools/oprofile/common
deleted file mode 100755
index fd00207..0000000
--- a/tools/oprofile/common
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-
-# Determine the session directory to use for oprofile.
-[ "$OPROFILE_SESSION_DIR" ] || OPROFILE_SESSION_DIR=/tmp/oprofv8
-
-# If no executable passed as the first parameter assume V8 release mode shell.
-if [[ -x $1 ]]
-then
- shell_exec=`readlink -f "$1"`
- # Any additional parameters are for the oprofile command.
- shift
-else
- oprofile_tools_path=`cd $(dirname "$0");pwd`
- [ "$V8_SHELL_DIR" ] || V8_SHELL_DIR=$oprofile_tools_path/../..
- shell_exec=$V8_SHELL_DIR/shell
-fi
-
-alias sudo_opcontrol='sudo opcontrol --session-dir="$OPROFILE_SESSION_DIR"'
-
diff --git a/tools/oprofile/dump b/tools/oprofile/dump
deleted file mode 100755
index 17bb0a1..0000000
--- a/tools/oprofile/dump
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-sudo_opcontrol --dump "@$"
-
diff --git a/tools/oprofile/report b/tools/oprofile/report
deleted file mode 100755
index b7f28b9..0000000
--- a/tools/oprofile/report
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-opreport --symbols --session-dir="$OPROFILE_SESSION_DIR" "$shell_exec" "$@"
-
diff --git a/tools/oprofile/reset b/tools/oprofile/reset
deleted file mode 100755
index edb7071..0000000
--- a/tools/oprofile/reset
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-sudo_opcontrol --reset "$@"
-
diff --git a/tools/oprofile/run b/tools/oprofile/run
deleted file mode 100755
index 0a92470..0000000
--- a/tools/oprofile/run
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-# Reset oprofile samples.
-sudo_opcontrol --reset
-
-# Run the executable to profile with the correct arguments.
-"$shell_exec" --oprofile "$@"
-
-# Flush oprofile data including the generated code into ELF binaries.
-sudo_opcontrol --dump
-
diff --git a/tools/oprofile/shutdown b/tools/oprofile/shutdown
deleted file mode 100755
index 8ebb72f..0000000
--- a/tools/oprofile/shutdown
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-sudo_opcontrol --shutdown "$@"
-
diff --git a/tools/oprofile/start b/tools/oprofile/start
deleted file mode 100755
index 059e4b8..0000000
--- a/tools/oprofile/start
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# Source common stuff.
-. `cd $(dirname "$0");pwd`/common
-
-sudo_opcontrol --start --no-vmlinux "$@"
-
diff --git a/tools/profile.js b/tools/profile.js
index b2de649..03bee83 100644
--- a/tools/profile.js
+++ b/tools/profile.js
@@ -26,27 +26,22 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Initlialize namespaces
-var devtools = devtools || {};
-devtools.profiler = devtools.profiler || {};
-
-
/**
* Creates a profile object for processing profiling-related events
* and calculating function execution times.
*
* @constructor
*/
-devtools.profiler.Profile = function() {
- this.codeMap_ = new devtools.profiler.CodeMap();
- this.topDownTree_ = new devtools.profiler.CallTree();
- this.bottomUpTree_ = new devtools.profiler.CallTree();
+function Profile() {
+ this.codeMap_ = new CodeMap();
+ this.topDownTree_ = new CallTree();
+ this.bottomUpTree_ = new CallTree();
};
/**
* Version of profiler log.
*/
-devtools.profiler.Profile.VERSION = 2;
+Profile.VERSION = 2;
/**
@@ -55,7 +50,7 @@
*
* @param {string} name Function name.
*/
-devtools.profiler.Profile.prototype.skipThisFunction = function(name) {
+Profile.prototype.skipThisFunction = function(name) {
return false;
};
@@ -66,7 +61,7 @@
*
* @enum {number}
*/
-devtools.profiler.Profile.Operation = {
+Profile.Operation = {
MOVE: 0,
DELETE: 1,
TICK: 2
@@ -76,7 +71,7 @@
/**
* Called whenever the specified operation has failed finding a function
* containing the specified address. Should be overriden by subclasses.
- * See the devtools.profiler.Profile.Operation enum for the list of
+ * See the Profile.Operation enum for the list of
* possible operations.
*
* @param {number} operation Operation.
@@ -85,7 +80,7 @@
* during stack strace processing, specifies a position of the frame
* containing the address.
*/
-devtools.profiler.Profile.prototype.handleUnknownCode = function(
+Profile.prototype.handleUnknownCode = function(
operation, addr, opt_stackPos) {
};
@@ -97,9 +92,9 @@
* @param {number} startAddr Starting address.
* @param {number} endAddr Ending address.
*/
-devtools.profiler.Profile.prototype.addLibrary = function(
+Profile.prototype.addLibrary = function(
name, startAddr, endAddr) {
- var entry = new devtools.profiler.CodeMap.CodeEntry(
+ var entry = new CodeMap.CodeEntry(
endAddr - startAddr, name);
this.codeMap_.addLibrary(startAddr, entry);
return entry;
@@ -113,9 +108,9 @@
* @param {number} startAddr Starting address.
* @param {number} endAddr Ending address.
*/
-devtools.profiler.Profile.prototype.addStaticCode = function(
+Profile.prototype.addStaticCode = function(
name, startAddr, endAddr) {
- var entry = new devtools.profiler.CodeMap.CodeEntry(
+ var entry = new CodeMap.CodeEntry(
endAddr - startAddr, name);
this.codeMap_.addStaticCode(startAddr, entry);
return entry;
@@ -130,9 +125,9 @@
* @param {number} start Starting address.
* @param {number} size Code entry size.
*/
-devtools.profiler.Profile.prototype.addCode = function(
+Profile.prototype.addCode = function(
type, name, start, size) {
- var entry = new devtools.profiler.Profile.DynamicCodeEntry(size, type, name);
+ var entry = new Profile.DynamicCodeEntry(size, type, name);
this.codeMap_.addCode(start, entry);
return entry;
};
@@ -144,7 +139,7 @@
* @param {number} aliasAddr Alias address.
* @param {number} addr Code entry address.
*/
-devtools.profiler.Profile.prototype.addCodeAlias = function(
+Profile.prototype.addCodeAlias = function(
aliasAddr, addr) {
var entry = this.codeMap_.findDynamicEntryByStartAddress(addr);
if (entry) {
@@ -159,11 +154,11 @@
* @param {number} from Current code entry address.
* @param {number} to New code entry address.
*/
-devtools.profiler.Profile.prototype.moveCode = function(from, to) {
+Profile.prototype.moveCode = function(from, to) {
try {
this.codeMap_.moveCode(from, to);
} catch (e) {
- this.handleUnknownCode(devtools.profiler.Profile.Operation.MOVE, from);
+ this.handleUnknownCode(Profile.Operation.MOVE, from);
}
};
@@ -173,11 +168,11 @@
*
* @param {number} start Starting address.
*/
-devtools.profiler.Profile.prototype.deleteCode = function(start) {
+Profile.prototype.deleteCode = function(start) {
try {
this.codeMap_.deleteCode(start);
} catch (e) {
- this.handleUnknownCode(devtools.profiler.Profile.Operation.DELETE, start);
+ this.handleUnknownCode(Profile.Operation.DELETE, start);
}
};
@@ -188,7 +183,7 @@
* @param {number} from Current code entry address.
* @param {number} to New code entry address.
*/
-devtools.profiler.Profile.prototype.safeMoveDynamicCode = function(from, to) {
+Profile.prototype.safeMoveDynamicCode = function(from, to) {
if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
this.codeMap_.moveCode(from, to);
}
@@ -200,7 +195,7 @@
*
* @param {number} start Starting address.
*/
-devtools.profiler.Profile.prototype.safeDeleteDynamicCode = function(start) {
+Profile.prototype.safeDeleteDynamicCode = function(start) {
if (this.codeMap_.findDynamicEntryByStartAddress(start)) {
this.codeMap_.deleteCode(start);
}
@@ -212,7 +207,7 @@
*
* @param {number} addr Entry address.
*/
-devtools.profiler.Profile.prototype.findEntry = function(addr) {
+Profile.prototype.findEntry = function(addr) {
return this.codeMap_.findEntry(addr);
};
@@ -223,7 +218,7 @@
*
* @param {Array<number>} stack Stack sample.
*/
-devtools.profiler.Profile.prototype.recordTick = function(stack) {
+Profile.prototype.recordTick = function(stack) {
var processedStack = this.resolveAndFilterFuncs_(stack);
this.bottomUpTree_.addPath(processedStack);
processedStack.reverse();
@@ -237,7 +232,7 @@
*
* @param {Array<number>} stack Stack sample.
*/
-devtools.profiler.Profile.prototype.resolveAndFilterFuncs_ = function(stack) {
+Profile.prototype.resolveAndFilterFuncs_ = function(stack) {
var result = [];
for (var i = 0; i < stack.length; ++i) {
var entry = this.codeMap_.findEntry(stack[i]);
@@ -248,7 +243,7 @@
}
} else {
this.handleUnknownCode(
- devtools.profiler.Profile.Operation.TICK, stack[i], i);
+ Profile.Operation.TICK, stack[i], i);
}
}
return result;
@@ -258,9 +253,9 @@
/**
* Performs a BF traversal of the top down call graph.
*
- * @param {function(devtools.profiler.CallTree.Node)} f Visitor function.
+ * @param {function(CallTree.Node)} f Visitor function.
*/
-devtools.profiler.Profile.prototype.traverseTopDownTree = function(f) {
+Profile.prototype.traverseTopDownTree = function(f) {
this.topDownTree_.traverse(f);
};
@@ -268,9 +263,9 @@
/**
* Performs a BF traversal of the bottom up call graph.
*
- * @param {function(devtools.profiler.CallTree.Node)} f Visitor function.
+ * @param {function(CallTree.Node)} f Visitor function.
*/
-devtools.profiler.Profile.prototype.traverseBottomUpTree = function(f) {
+Profile.prototype.traverseBottomUpTree = function(f) {
this.bottomUpTree_.traverse(f);
};
@@ -281,7 +276,7 @@
*
* @param {string} opt_label Node label.
*/
-devtools.profiler.Profile.prototype.getTopDownProfile = function(opt_label) {
+Profile.prototype.getTopDownProfile = function(opt_label) {
return this.getTreeProfile_(this.topDownTree_, opt_label);
};
@@ -292,7 +287,7 @@
*
* @param {string} opt_label Node label.
*/
-devtools.profiler.Profile.prototype.getBottomUpProfile = function(opt_label) {
+Profile.prototype.getBottomUpProfile = function(opt_label) {
return this.getTreeProfile_(this.bottomUpTree_, opt_label);
};
@@ -300,10 +295,10 @@
/**
* Helper function for calculating a tree profile.
*
- * @param {devtools.profiler.Profile.CallTree} tree Call tree.
+ * @param {Profile.CallTree} tree Call tree.
* @param {string} opt_label Node label.
*/
-devtools.profiler.Profile.prototype.getTreeProfile_ = function(tree, opt_label) {
+Profile.prototype.getTreeProfile_ = function(tree, opt_label) {
if (!opt_label) {
tree.computeTotalWeights();
return tree;
@@ -321,9 +316,9 @@
*
* @param {string} opt_label Starting node label.
*/
-devtools.profiler.Profile.prototype.getFlatProfile = function(opt_label) {
- var counters = new devtools.profiler.CallTree();
- var rootLabel = opt_label || devtools.profiler.CallTree.ROOT_NODE_LABEL;
+Profile.prototype.getFlatProfile = function(opt_label) {
+ var counters = new CallTree();
+ var rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
var precs = {};
precs[rootLabel] = 0;
var root = counters.findOrAddChild(rootLabel);
@@ -378,8 +373,8 @@
* @param {string} name Function name.
* @constructor
*/
-devtools.profiler.Profile.DynamicCodeEntry = function(size, type, name) {
- devtools.profiler.CodeMap.CodeEntry.call(this, size, name);
+Profile.DynamicCodeEntry = function(size, type, name) {
+ CodeMap.CodeEntry.call(this, size, name);
this.type = type;
};
@@ -387,7 +382,7 @@
/**
* Returns node name.
*/
-devtools.profiler.Profile.DynamicCodeEntry.prototype.getName = function() {
+Profile.DynamicCodeEntry.prototype.getName = function() {
var name = this.name;
if (name.length == 0) {
name = '<anonymous>';
@@ -402,12 +397,12 @@
/**
* Returns raw node name (without type decoration).
*/
-devtools.profiler.Profile.DynamicCodeEntry.prototype.getRawName = function() {
+Profile.DynamicCodeEntry.prototype.getRawName = function() {
return this.name;
};
-devtools.profiler.Profile.DynamicCodeEntry.prototype.isJSFunction = function() {
+Profile.DynamicCodeEntry.prototype.isJSFunction = function() {
return this.type == "Function" ||
this.type == "LazyCompile" ||
this.type == "Script";
@@ -419,28 +414,28 @@
*
* @constructor
*/
-devtools.profiler.CallTree = function() {
- this.root_ = new devtools.profiler.CallTree.Node(
- devtools.profiler.CallTree.ROOT_NODE_LABEL);
+function CallTree() {
+ this.root_ = new CallTree.Node(
+ CallTree.ROOT_NODE_LABEL);
};
/**
* The label of the root node.
*/
-devtools.profiler.CallTree.ROOT_NODE_LABEL = '';
+CallTree.ROOT_NODE_LABEL = '';
/**
* @private
*/
-devtools.profiler.CallTree.prototype.totalsComputed_ = false;
+CallTree.prototype.totalsComputed_ = false;
/**
* Returns the tree root.
*/
-devtools.profiler.CallTree.prototype.getRoot = function() {
+CallTree.prototype.getRoot = function() {
return this.root_;
};
@@ -450,7 +445,7 @@
*
* @param {Array<string>} path Call path.
*/
-devtools.profiler.CallTree.prototype.addPath = function(path) {
+CallTree.prototype.addPath = function(path) {
if (path.length == 0) {
return;
}
@@ -470,7 +465,7 @@
*
* @param {string} label Child node label.
*/
-devtools.profiler.CallTree.prototype.findOrAddChild = function(label) {
+CallTree.prototype.findOrAddChild = function(label) {
return this.root_.findOrAddChild(label);
};
@@ -491,8 +486,8 @@
*
* @param {string} label The label of the new root node.
*/
-devtools.profiler.CallTree.prototype.cloneSubtree = function(label) {
- var subTree = new devtools.profiler.CallTree();
+CallTree.prototype.cloneSubtree = function(label) {
+ var subTree = new CallTree();
this.traverse(function(node, parent) {
if (!parent && node.label != label) {
return null;
@@ -508,7 +503,7 @@
/**
* Computes total weights in the call graph.
*/
-devtools.profiler.CallTree.prototype.computeTotalWeights = function() {
+CallTree.prototype.computeTotalWeights = function() {
if (this.totalsComputed_) {
return;
}
@@ -529,10 +524,10 @@
* return nodeClone;
* });
*
- * @param {function(devtools.profiler.CallTree.Node, *)} f Visitor function.
+ * @param {function(CallTree.Node, *)} f Visitor function.
* The second parameter is the result of calling 'f' on the parent node.
*/
-devtools.profiler.CallTree.prototype.traverse = function(f) {
+CallTree.prototype.traverse = function(f) {
var pairsToProcess = new ConsArray();
pairsToProcess.concat([{node: this.root_, param: null}]);
while (!pairsToProcess.atEnd()) {
@@ -550,12 +545,12 @@
/**
* Performs an indepth call graph traversal.
*
- * @param {function(devtools.profiler.CallTree.Node)} enter A function called
+ * @param {function(CallTree.Node)} enter A function called
* prior to visiting node's children.
- * @param {function(devtools.profiler.CallTree.Node)} exit A function called
+ * @param {function(CallTree.Node)} exit A function called
* after visiting node's children.
*/
-devtools.profiler.CallTree.prototype.traverseInDepth = function(enter, exit) {
+CallTree.prototype.traverseInDepth = function(enter, exit) {
function traverse(node) {
enter(node);
node.forEachChild(traverse);
@@ -569,9 +564,9 @@
* Constructs a call graph node.
*
* @param {string} label Node label.
- * @param {devtools.profiler.CallTree.Node} opt_parent Node parent.
+ * @param {CallTree.Node} opt_parent Node parent.
*/
-devtools.profiler.CallTree.Node = function(label, opt_parent) {
+CallTree.Node = function(label, opt_parent) {
this.label = label;
this.parent = opt_parent;
this.children = {};
@@ -583,14 +578,14 @@
* a call path).
* @type {number}
*/
-devtools.profiler.CallTree.Node.prototype.selfWeight = 0;
+CallTree.Node.prototype.selfWeight = 0;
/**
* Node total weight (includes weights of all children).
* @type {number}
*/
-devtools.profiler.CallTree.Node.prototype.totalWeight = 0;
+CallTree.Node.prototype.totalWeight = 0;
/**
@@ -598,8 +593,8 @@
*
* @param {string} label Child node label.
*/
-devtools.profiler.CallTree.Node.prototype.addChild = function(label) {
- var child = new devtools.profiler.CallTree.Node(label, this);
+CallTree.Node.prototype.addChild = function(label) {
+ var child = new CallTree.Node(label, this);
this.children[label] = child;
return child;
};
@@ -608,7 +603,7 @@
/**
* Computes node's total weight.
*/
-devtools.profiler.CallTree.Node.prototype.computeTotalWeight =
+CallTree.Node.prototype.computeTotalWeight =
function() {
var totalWeight = this.selfWeight;
this.forEachChild(function(child) {
@@ -620,7 +615,7 @@
/**
* Returns all node's children as an array.
*/
-devtools.profiler.CallTree.Node.prototype.exportChildren = function() {
+CallTree.Node.prototype.exportChildren = function() {
var result = [];
this.forEachChild(function (node) { result.push(node); });
return result;
@@ -632,7 +627,7 @@
*
* @param {string} label Child node label.
*/
-devtools.profiler.CallTree.Node.prototype.findChild = function(label) {
+CallTree.Node.prototype.findChild = function(label) {
return this.children[label] || null;
};
@@ -643,7 +638,7 @@
*
* @param {string} label Child node label.
*/
-devtools.profiler.CallTree.Node.prototype.findOrAddChild = function(label) {
+CallTree.Node.prototype.findOrAddChild = function(label) {
return this.findChild(label) || this.addChild(label);
};
@@ -651,9 +646,9 @@
/**
* Calls the specified function for every child.
*
- * @param {function(devtools.profiler.CallTree.Node)} f Visitor function.
+ * @param {function(CallTree.Node)} f Visitor function.
*/
-devtools.profiler.CallTree.Node.prototype.forEachChild = function(f) {
+CallTree.Node.prototype.forEachChild = function(f) {
for (var c in this.children) {
f(this.children[c]);
}
@@ -663,9 +658,9 @@
/**
* Walks up from the current node up to the call tree root.
*
- * @param {function(devtools.profiler.CallTree.Node)} f Visitor function.
+ * @param {function(CallTree.Node)} f Visitor function.
*/
-devtools.profiler.CallTree.Node.prototype.walkUpToRoot = function(f) {
+CallTree.Node.prototype.walkUpToRoot = function(f) {
for (var curr = this; curr != null; curr = curr.parent) {
f(curr);
}
@@ -676,9 +671,9 @@
* Tries to find a node with the specified path.
*
* @param {Array<string>} labels The path.
- * @param {function(devtools.profiler.CallTree.Node)} opt_f Visitor function.
+ * @param {function(CallTree.Node)} opt_f Visitor function.
*/
-devtools.profiler.CallTree.Node.prototype.descendToChild = function(
+CallTree.Node.prototype.descendToChild = function(
labels, opt_f) {
for (var pos = 0, curr = this; pos < labels.length && curr != null; pos++) {
var child = curr.findChild(labels[pos]);
diff --git a/tools/profile_view.js b/tools/profile_view.js
index bdea631..e041909 100644
--- a/tools/profile_view.js
+++ b/tools/profile_view.js
@@ -26,18 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Initlialize namespaces
-var devtools = devtools || {};
-devtools.profiler = devtools.profiler || {};
-
-
/**
* Creates a Profile View builder object.
*
* @param {number} samplingRate Number of ms between profiler ticks.
* @constructor
*/
-devtools.profiler.ViewBuilder = function(samplingRate) {
+function ViewBuilder(samplingRate) {
this.samplingRate = samplingRate;
};
@@ -45,11 +40,11 @@
/**
* Builds a profile view for the specified call tree.
*
- * @param {devtools.profiler.CallTree} callTree A call tree.
+ * @param {CallTree} callTree A call tree.
* @param {boolean} opt_bottomUpViewWeights Whether remapping
* of self weights for a bottom up view is needed.
*/
-devtools.profiler.ViewBuilder.prototype.buildView = function(
+ViewBuilder.prototype.buildView = function(
callTree, opt_bottomUpViewWeights) {
var head;
var samplingRate = this.samplingRate;
@@ -80,11 +75,11 @@
/**
* Factory method for a profile view.
*
- * @param {devtools.profiler.ProfileView.Node} head View head node.
- * @return {devtools.profiler.ProfileView} Profile view.
+ * @param {ProfileView.Node} head View head node.
+ * @return {ProfileView} Profile view.
*/
-devtools.profiler.ViewBuilder.prototype.createView = function(head) {
- return new devtools.profiler.ProfileView(head);
+ViewBuilder.prototype.createView = function(head) {
+ return new ProfileView(head);
};
@@ -97,12 +92,12 @@
* profile they can be either callees or callers.)
* @param {number} selfTime Amount of time that application spent in the
* corresponding function only.
- * @param {devtools.profiler.ProfileView.Node} head Profile view head.
- * @return {devtools.profiler.ProfileView.Node} Profile view node.
+ * @param {ProfileView.Node} head Profile view head.
+ * @return {ProfileView.Node} Profile view node.
*/
-devtools.profiler.ViewBuilder.prototype.createViewNode = function(
+ViewBuilder.prototype.createViewNode = function(
funcName, totalTime, selfTime, head) {
- return new devtools.profiler.ProfileView.Node(
+ return new ProfileView.Node(
funcName, totalTime, selfTime, head);
};
@@ -111,10 +106,10 @@
* Creates a Profile View object. It allows to perform sorting
* and filtering actions on the profile.
*
- * @param {devtools.profiler.ProfileView.Node} head Head (root) node.
+ * @param {ProfileView.Node} head Head (root) node.
* @constructor
*/
-devtools.profiler.ProfileView = function(head) {
+function ProfileView(head) {
this.head = head;
};
@@ -122,11 +117,11 @@
/**
* Sorts the profile view using the specified sort function.
*
- * @param {function(devtools.profiler.ProfileView.Node,
- * devtools.profiler.ProfileView.Node):number} sortFunc A sorting
+ * @param {function(ProfileView.Node,
+ * ProfileView.Node):number} sortFunc A sorting
* functions. Must comply with Array.sort sorting function requirements.
*/
-devtools.profiler.ProfileView.prototype.sort = function(sortFunc) {
+ProfileView.prototype.sort = function(sortFunc) {
this.traverse(function (node) {
node.sortChildren(sortFunc);
});
@@ -136,9 +131,9 @@
/**
* Traverses profile view nodes in preorder.
*
- * @param {function(devtools.profiler.ProfileView.Node)} f Visitor function.
+ * @param {function(ProfileView.Node)} f Visitor function.
*/
-devtools.profiler.ProfileView.prototype.traverse = function(f) {
+ProfileView.prototype.traverse = function(f) {
var nodesToTraverse = new ConsArray();
nodesToTraverse.concat([this.head]);
while (!nodesToTraverse.atEnd()) {
@@ -159,10 +154,10 @@
* profile they can be either callees or callers.)
* @param {number} selfTime Amount of time that application spent in the
* corresponding function only.
- * @param {devtools.profiler.ProfileView.Node} head Profile view head.
+ * @param {ProfileView.Node} head Profile view head.
* @constructor
*/
-devtools.profiler.ProfileView.Node = function(
+ProfileView.Node = function(
internalFuncName, totalTime, selfTime, head) {
this.internalFuncName = internalFuncName;
this.totalTime = totalTime;
@@ -176,7 +171,7 @@
/**
* Returns a share of the function's total time in application's total time.
*/
-devtools.profiler.ProfileView.Node.prototype.__defineGetter__(
+ProfileView.Node.prototype.__defineGetter__(
'totalPercent',
function() { return this.totalTime /
(this.head ? this.head.totalTime : this.totalTime) * 100.0; });
@@ -185,7 +180,7 @@
/**
* Returns a share of the function's self time in application's total time.
*/
-devtools.profiler.ProfileView.Node.prototype.__defineGetter__(
+ProfileView.Node.prototype.__defineGetter__(
'selfPercent',
function() { return this.selfTime /
(this.head ? this.head.totalTime : this.totalTime) * 100.0; });
@@ -194,7 +189,7 @@
/**
* Returns a share of the function's total time in its parent's total time.
*/
-devtools.profiler.ProfileView.Node.prototype.__defineGetter__(
+ProfileView.Node.prototype.__defineGetter__(
'parentTotalPercent',
function() { return this.totalTime /
(this.parent ? this.parent.totalTime : this.totalTime) * 100.0; });
@@ -203,9 +198,9 @@
/**
* Adds a child to the node.
*
- * @param {devtools.profiler.ProfileView.Node} node Child node.
+ * @param {ProfileView.Node} node Child node.
*/
-devtools.profiler.ProfileView.Node.prototype.addChild = function(node) {
+ProfileView.Node.prototype.addChild = function(node) {
node.parent = this;
this.children.push(node);
};
@@ -214,11 +209,11 @@
/**
* Sorts all the node's children recursively.
*
- * @param {function(devtools.profiler.ProfileView.Node,
- * devtools.profiler.ProfileView.Node):number} sortFunc A sorting
+ * @param {function(ProfileView.Node,
+ * ProfileView.Node):number} sortFunc A sorting
* functions. Must comply with Array.sort sorting function requirements.
*/
-devtools.profiler.ProfileView.Node.prototype.sortChildren = function(
+ProfileView.Node.prototype.sortChildren = function(
sortFunc) {
this.children.sort(sortFunc);
};
diff --git a/tools/splaytree.js b/tools/splaytree.js
index 7b3af8b..1c9aab9 100644
--- a/tools/splaytree.js
+++ b/tools/splaytree.js
@@ -26,12 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// A namespace stub. It will become more clear how to declare it properly
-// during integration of this script into Dev Tools.
-var goog = goog || {};
-goog.structs = goog.structs || {};
-
-
/**
* Constructs a Splay tree. A splay tree is a self-balancing binary
* search tree with the additional property that recently accessed
@@ -40,23 +34,23 @@
*
* @constructor
*/
-goog.structs.SplayTree = function() {
+function SplayTree() {
};
/**
* Pointer to the root node of the tree.
*
- * @type {goog.structs.SplayTree.Node}
+ * @type {SplayTree.Node}
* @private
*/
-goog.structs.SplayTree.prototype.root_ = null;
+SplayTree.prototype.root_ = null;
/**
* @return {boolean} Whether the tree is empty.
*/
-goog.structs.SplayTree.prototype.isEmpty = function() {
+SplayTree.prototype.isEmpty = function() {
return !this.root_;
};
@@ -70,9 +64,9 @@
* @param {number} key Key to insert into the tree.
* @param {*} value Value to insert into the tree.
*/
-goog.structs.SplayTree.prototype.insert = function(key, value) {
+SplayTree.prototype.insert = function(key, value) {
if (this.isEmpty()) {
- this.root_ = new goog.structs.SplayTree.Node(key, value);
+ this.root_ = new SplayTree.Node(key, value);
return;
}
// Splay on the key to move the last node on the search path for
@@ -81,7 +75,7 @@
if (this.root_.key == key) {
return;
}
- var node = new goog.structs.SplayTree.Node(key, value);
+ var node = new SplayTree.Node(key, value);
if (key > this.root_.key) {
node.left = this.root_;
node.right = this.root_.right;
@@ -101,9 +95,9 @@
* key is not found, an exception is thrown.
*
* @param {number} key Key to find and remove from the tree.
- * @return {goog.structs.SplayTree.Node} The removed node.
+ * @return {SplayTree.Node} The removed node.
*/
-goog.structs.SplayTree.prototype.remove = function(key) {
+SplayTree.prototype.remove = function(key) {
if (this.isEmpty()) {
throw Error('Key not found: ' + key);
}
@@ -132,9 +126,9 @@
* a node with the specified key.
*
* @param {number} key Key to find in the tree.
- * @return {goog.structs.SplayTree.Node} Node having the specified key.
+ * @return {SplayTree.Node} Node having the specified key.
*/
-goog.structs.SplayTree.prototype.find = function(key) {
+SplayTree.prototype.find = function(key) {
if (this.isEmpty()) {
return null;
}
@@ -144,9 +138,9 @@
/**
- * @return {goog.structs.SplayTree.Node} Node having the minimum key value.
+ * @return {SplayTree.Node} Node having the minimum key value.
*/
-goog.structs.SplayTree.prototype.findMin = function() {
+SplayTree.prototype.findMin = function() {
if (this.isEmpty()) {
return null;
}
@@ -159,9 +153,9 @@
/**
- * @return {goog.structs.SplayTree.Node} Node having the maximum key value.
+ * @return {SplayTree.Node} Node having the maximum key value.
*/
-goog.structs.SplayTree.prototype.findMax = function(opt_startNode) {
+SplayTree.prototype.findMax = function(opt_startNode) {
if (this.isEmpty()) {
return null;
}
@@ -174,10 +168,10 @@
/**
- * @return {goog.structs.SplayTree.Node} Node having the maximum key value that
+ * @return {SplayTree.Node} Node having the maximum key value that
* is less or equal to the specified key value.
*/
-goog.structs.SplayTree.prototype.findGreatestLessThan = function(key) {
+SplayTree.prototype.findGreatestLessThan = function(key) {
if (this.isEmpty()) {
return null;
}
@@ -199,7 +193,7 @@
/**
* @return {Array<*>} An array containing all the values of tree's nodes.
*/
-goog.structs.SplayTree.prototype.exportValues = function() {
+SplayTree.prototype.exportValues = function() {
var result = [];
this.traverse_(function(node) { result.push(node.value); });
return result;
@@ -216,7 +210,7 @@
* @param {number} key Key to splay the tree on.
* @private
*/
-goog.structs.SplayTree.prototype.splay_ = function(key) {
+SplayTree.prototype.splay_ = function(key) {
if (this.isEmpty()) {
return;
}
@@ -226,7 +220,7 @@
// will hold the R tree of the algorithm. Using a dummy node, left
// and right will always be nodes and we avoid special cases.
var dummy, left, right;
- dummy = left = right = new goog.structs.SplayTree.Node(null, null);
+ dummy = left = right = new SplayTree.Node(null, null);
var current = this.root_;
while (true) {
if (key < current.key) {
@@ -281,10 +275,10 @@
/**
* Performs a preorder traversal of the tree.
*
- * @param {function(goog.structs.SplayTree.Node)} f Visitor function.
+ * @param {function(SplayTree.Node)} f Visitor function.
* @private
*/
-goog.structs.SplayTree.prototype.traverse_ = function(f) {
+SplayTree.prototype.traverse_ = function(f) {
var nodesToVisit = [this.root_];
while (nodesToVisit.length > 0) {
var node = nodesToVisit.shift();
@@ -304,19 +298,19 @@
* @param {number} key Key.
* @param {*} value Value.
*/
-goog.structs.SplayTree.Node = function(key, value) {
+SplayTree.Node = function(key, value) {
this.key = key;
this.value = value;
};
/**
- * @type {goog.structs.SplayTree.Node}
+ * @type {SplayTree.Node}
*/
-goog.structs.SplayTree.Node.prototype.left = null;
+SplayTree.Node.prototype.left = null;
/**
- * @type {goog.structs.SplayTree.Node}
+ * @type {SplayTree.Node}
*/
-goog.structs.SplayTree.Node.prototype.right = null;
+SplayTree.Node.prototype.right = null;
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 87864d1..db2f3c9 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -26,16 +26,21 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-function Profile(separateIc) {
- devtools.profiler.Profile.call(this);
+function inherits(childCtor, parentCtor) {
+ childCtor.prototype.__proto__ = parentCtor.prototype;
+};
+
+
+function V8Profile(separateIc) {
+ Profile.call(this);
if (!separateIc) {
- this.skipThisFunction = function(name) { return Profile.IC_RE.test(name); };
+ this.skipThisFunction = function(name) { return V8Profile.IC_RE.test(name); };
}
};
-Profile.prototype = devtools.profiler.Profile.prototype;
+inherits(V8Profile, Profile);
-Profile.IC_RE =
+V8Profile.IC_RE =
/^(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Call|Load|Store)IC_)/;
@@ -52,13 +57,8 @@
}
-function inherits(childCtor, parentCtor) {
- childCtor.prototype.__proto__ = parentCtor.prototype;
-};
-
-
function SnapshotLogProcessor() {
- devtools.profiler.LogReader.call(this, {
+ LogReader.call(this, {
'code-creation': {
parsers: [null, parseInt, parseInt, null],
processor: this.processCodeCreation },
@@ -72,8 +72,8 @@
'snapshot-pos': { parsers: [parseInt, parseInt],
processor: this.processSnapshotPosition }});
- Profile.prototype.handleUnknownCode = function(operation, addr) {
- var op = devtools.profiler.Profile.Operation;
+ V8Profile.prototype.handleUnknownCode = function(operation, addr) {
+ var op = Profile.Operation;
switch (operation) {
case op.MOVE:
print('Snapshot: Code move event for unknown code: 0x' +
@@ -86,10 +86,10 @@
}
};
- this.profile_ = new Profile();
+ this.profile_ = new V8Profile();
this.serializedEntries_ = [];
}
-inherits(SnapshotLogProcessor, devtools.profiler.LogReader);
+inherits(SnapshotLogProcessor, LogReader);
SnapshotLogProcessor.prototype.processCodeCreation = function(
@@ -127,7 +127,7 @@
function TickProcessor(
cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) {
- devtools.profiler.LogReader.call(this, {
+ LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
'code-creation': {
@@ -172,9 +172,9 @@
var ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
- Profile.prototype.handleUnknownCode = function(
+ V8Profile.prototype.handleUnknownCode = function(
operation, addr, opt_stackPos) {
- var op = devtools.profiler.Profile.Operation;
+ var op = Profile.Operation;
switch (operation) {
case op.MOVE:
print('Code move event for unknown code: 0x' + addr.toString(16));
@@ -193,16 +193,16 @@
}
};
- this.profile_ = new Profile(separateIc);
+ this.profile_ = new V8Profile(separateIc);
this.codeTypes_ = {};
// Count each tick as a time unit.
- this.viewBuilder_ = new devtools.profiler.ViewBuilder(1);
+ this.viewBuilder_ = new ViewBuilder(1);
this.lastLogFileName_ = null;
this.generation_ = 1;
this.currentProducerProfile_ = null;
};
-inherits(TickProcessor, devtools.profiler.LogReader);
+inherits(TickProcessor, LogReader);
TickProcessor.VmStates = {
@@ -356,7 +356,7 @@
TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
if (space != 'Heap') return;
- this.currentProducerProfile_ = new devtools.profiler.CallTree();
+ this.currentProducerProfile_ = new CallTree();
};
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index b994d36..24321e5 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -47,6 +47,7 @@
890A14020EE9C4B400E49346 /* regexp-macro-assembler-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C750EE466D000B48DEB /* regexp-macro-assembler-irregexp.cc */; };
890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C770EE466D000B48DEB /* regexp-macro-assembler-tracer.cc */; };
890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C790EE466D000B48DEB /* regexp-macro-assembler.cc */; };
+ 8924315C12F8539900906AB2 /* lithium-gap-resolver-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8924315A12F8539900906AB2 /* lithium-gap-resolver-x64.cc */; };
8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8938A2A212D63B630080CDDE /* lithium-x64.cc */; };
893988070F2A35FA007D5254 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
8939880D0F2A362A007D5254 /* d8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C920EE46A1700B48DEB /* d8.cc */; };
@@ -156,7 +157,6 @@
8956926612D4ED240072C313 /* messages.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF15C0E719B8F00D62E90 /* messages.cc */; };
8956926712D4ED240072C313 /* objects-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1600E719B8F00D62E90 /* objects-debug.cc */; };
8956926812D4ED240072C313 /* objects.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1620E719B8F00D62E90 /* objects.cc */; };
- 8956926912D4ED240072C313 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
8956926A12D4ED240072C313 /* parser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1640E719B8F00D62E90 /* parser.cc */; };
8956926B12D4ED240072C313 /* platform-macos.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1670E719B8F00D62E90 /* platform-macos.cc */; };
8956926C12D4ED240072C313 /* platform-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893A72230F7B0FF200303DD2 /* platform-posix.cc */; };
@@ -319,6 +319,13 @@
89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; };
89B933AF0FAA0F9600201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
89B933B00FAA0F9D00201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
+ 89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */; };
+ 89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
+ 89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
+ 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
+ 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
+ 89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
+ 89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
89F23C400E78D5B2006B2466 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
89F23C410E78D5B2006B2466 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; };
@@ -419,8 +426,6 @@
9FA38BCF1175B30400C4CD55 /* full-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */; };
9FA38BD01175B30400C4CD55 /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */; };
9FA38BD11175B30400C4CD55 /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */; };
- 9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
- 9FC86ABE0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
C2BD4BD7120165460046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
C2BD4BDB120165A70046BF9F /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
C2BD4BE4120166180046BF9F /* fixed-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */; };
@@ -556,6 +561,8 @@
58950D5A0F55514900F3E8BA /* virtual-frame.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame.cc"; sourceTree = "<group>"; };
58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; };
8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
+ 8924315A12F8539900906AB2 /* lithium-gap-resolver-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-x64.cc"; path = "x64/lithium-gap-resolver-x64.cc"; sourceTree = "<group>"; };
+ 8924315B12F8539900906AB2 /* lithium-gap-resolver-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-x64.h"; path = "x64/lithium-gap-resolver-x64.h"; sourceTree = "<group>"; };
8938A2A212D63B630080CDDE /* lithium-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-x64.cc"; path = "x64/lithium-x64.cc"; sourceTree = "<group>"; };
893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
8939880B0F2A35FA007D5254 /* d8 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = d8; sourceTree = BUILT_PRODUCTS_DIR; };
@@ -886,6 +893,12 @@
89B91B9A12D4EF95002FF4BC /* virtual-frame-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-x64.h"; path = "x64/virtual-frame-x64.h"; sourceTree = "<group>"; };
89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
+ 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-ia32.cc"; path = "ia32/lithium-gap-resolver-ia32.cc"; sourceTree = "<group>"; };
+ 89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-ia32.h"; path = "ia32/lithium-gap-resolver-ia32.h"; sourceTree = "<group>"; };
+ 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "gdb-jit.cc"; sourceTree = "<group>"; };
+ 89D7DDD712E8DE09001E2B82 /* gdb-jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gdb-jit.h"; sourceTree = "<group>"; };
+ 89D7DDD812E8DE09001E2B82 /* inspector.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = inspector.cc; sourceTree = "<group>"; };
+ 89D7DDD912E8DE09001E2B82 /* inspector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inspector.h; sourceTree = "<group>"; };
89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = "<group>"; };
@@ -936,8 +949,6 @@
9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-arm.cc"; path = "arm/full-codegen-arm.cc"; sourceTree = "<group>"; };
9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; };
9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-arm.cc"; path = "arm/virtual-frame-arm.cc"; sourceTree = "<group>"; };
- 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
- 9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue-inl.h"; sourceTree = "<group>"; };
9FF7A28311A642EA0051B8F2 /* unbound-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue.h"; sourceTree = "<group>"; };
C2BD4BD5120165460046BF9F /* dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dtoa.cc; sourceTree = "<group>"; };
@@ -1101,7 +1112,6 @@
897FF1270E719B8F00D62E90 /* dateparser.h */,
8956B6CD0F5D86570033B5A2 /* debug-agent.cc */,
8956B6CE0F5D86570033B5A2 /* debug-agent.h */,
- 898BD20C0EF6CC850068B00A /* debug-arm.cc */,
897FF1280E719B8F00D62E90 /* debug.cc */,
897FF1290E719B8F00D62E90 /* debug.h */,
893E248B12B14B3D0083370F /* deoptimizer.cc */,
@@ -1141,6 +1151,8 @@
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */,
893E24DA12B14B9F0083370F /* gc-extension.cc */,
893E24DB12B14B9F0083370F /* gc-extension.h */,
+ 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */,
+ 89D7DDD712E8DE09001E2B82 /* gdb-jit.h */,
897FF13E0E719B8F00D62E90 /* global-handles.cc */,
897FF13F0E719B8F00D62E90 /* global-handles.h */,
897FF1400E719B8F00D62E90 /* globals.h */,
@@ -1158,10 +1170,11 @@
893E248E12B14B3D0083370F /* hydrogen-instructions.h */,
893E248F12B14B3D0083370F /* hydrogen.cc */,
893E249012B14B3D0083370F /* hydrogen.h */,
- 897FF1490E719B8F00D62E90 /* ic-arm.cc */,
897FF14B0E719B8F00D62E90 /* ic-inl.h */,
897FF14C0E719B8F00D62E90 /* ic.cc */,
897FF14D0E719B8F00D62E90 /* ic.h */,
+ 89D7DDD812E8DE09001E2B82 /* inspector.cc */,
+ 89D7DDD912E8DE09001E2B82 /* inspector.h */,
89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */,
89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
@@ -1203,8 +1216,6 @@
C2D1E9721212F27B00187A52 /* objects-visiting.h */,
897FF1620E719B8F00D62E90 /* objects.cc */,
897FF1630E719B8F00D62E90 /* objects.h */,
- 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */,
- 9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */,
897FF1640E719B8F00D62E90 /* parser.cc */,
897FF1650E719B8F00D62E90 /* parser.h */,
89A15C6D0EE466A900B48DEB /* platform-freebsd.cc */,
@@ -1273,7 +1284,6 @@
897FF1890E719B8F00D62E90 /* string-stream.h */,
893E24A312B14B3D0083370F /* strtod.cc */,
893E24A412B14B3D0083370F /* strtod.h */,
- 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */,
897FF18C0E719B8F00D62E90 /* stub-cache.cc */,
897FF18D0E719B8F00D62E90 /* stub-cache.h */,
897FF18E0E719B8F00D62E90 /* token.cc */,
@@ -1442,6 +1452,8 @@
89B91B8C12D4EF95002FF4BC /* jump-target-x64.cc */,
89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */,
89B91B8D12D4EF95002FF4BC /* lithium-codegen-x64.h */,
+ 8924315A12F8539900906AB2 /* lithium-gap-resolver-x64.cc */,
+ 8924315B12F8539900906AB2 /* lithium-gap-resolver-x64.h */,
8938A2A212D63B630080CDDE /* lithium-x64.cc */,
89B91B8E12D4EF95002FF4BC /* lithium-x64.h */,
89B91B8F12D4EF95002FF4BC /* macro-assembler-x64.cc */,
@@ -1463,6 +1475,8 @@
89B91C0312D4F275002FF4BC /* ia32 */ = {
isa = PBXGroup;
children = (
+ 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */,
+ 89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */,
897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */,
897FF1010E719B8F00D62E90 /* assembler-ia32.cc */,
897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
@@ -1515,8 +1529,10 @@
896448BC0E9D530500E7C516 /* codegen-arm.h */,
895FA748107FFE73006F39D4 /* constants-arm.cc */,
897FF11B0E719B8F00D62E90 /* constants-arm.h */,
+ 898BD20C0EF6CC850068B00A /* debug-arm.cc */,
893E24C612B14B510083370F /* deoptimizer-arm.cc */,
9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */,
+ 897FF1490E719B8F00D62E90 /* ic-arm.cc */,
9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */,
893E24C712B14B510083370F /* lithium-arm.cc */,
893E24C812B14B510083370F /* lithium-arm.h */,
@@ -1531,6 +1547,7 @@
895FA751107FFEAE006F39D4 /* register-allocator-arm.h */,
897FF17D0E719B8F00D62E90 /* simulator-arm.cc */,
897FF17E0E719B8F00D62E90 /* simulator-arm.h */,
+ 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */,
893E24CB12B14B520083370F /* virtual-frame-arm-inl.h */,
9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */,
58950D570F55514900F3E8BA /* virtual-frame-arm.h */,
@@ -1882,7 +1899,6 @@
8956926612D4ED240072C313 /* messages.cc in Sources */,
8956926712D4ED240072C313 /* objects-debug.cc in Sources */,
8956926812D4ED240072C313 /* objects.cc in Sources */,
- 8956926912D4ED240072C313 /* oprofile-agent.cc in Sources */,
8956926A12D4ED240072C313 /* parser.cc in Sources */,
8956926B12D4ED240072C313 /* platform-macos.cc in Sources */,
8956926C12D4ED240072C313 /* platform-posix.cc in Sources */,
@@ -1958,6 +1974,9 @@
8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */,
894A59E912D777E80000766D /* lithium.cc in Sources */,
89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */,
+ 89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
+ 89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */,
+ 8924315C12F8539900906AB2 /* lithium-gap-resolver-x64.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2029,7 +2048,6 @@
89A88E120E71A67A0043BA31 /* messages.cc in Sources */,
89A88E130E71A6860043BA31 /* objects-debug.cc in Sources */,
89A88E140E71A6870043BA31 /* objects.cc in Sources */,
- 9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */,
89A88E150E71A68C0043BA31 /* parser.cc in Sources */,
89A88E160E71A68E0043BA31 /* platform-macos.cc in Sources */,
893A72240F7B101400303DD2 /* platform-posix.cc in Sources */,
@@ -2093,6 +2111,9 @@
893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */,
8946827512C26EB700C914BC /* objects-printer.cc in Sources */,
894A59EB12D777E80000766D /* lithium.cc in Sources */,
+ 89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */,
+ 89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
+ 89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2203,7 +2224,6 @@
89F23C660E78D5B2006B2466 /* messages.cc in Sources */,
89F23C670E78D5B2006B2466 /* objects-debug.cc in Sources */,
89F23C680E78D5B2006B2466 /* objects.cc in Sources */,
- 9FC86ABE0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */,
89F23C690E78D5B2006B2466 /* parser.cc in Sources */,
89F23C6A0E78D5B2006B2466 /* platform-macos.cc in Sources */,
893A72250F7B101B00303DD2 /* platform-posix.cc in Sources */,
@@ -2268,6 +2288,8 @@
893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */,
8946827612C26EB700C914BC /* objects-printer.cc in Sources */,
894A59EA12D777E80000766D /* lithium.cc in Sources */,
+ 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
+ 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2395,6 +2417,7 @@
V8_ENABLE_CHECKS,
OBJECT_PRINT,
ENABLE_VMSTATE_TRACKING,
+ ENABLE_DEBUGGER_SUPPORT,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = YES;
@@ -2434,6 +2457,7 @@
GCC_PREPROCESSOR_DEFINITIONS = (
"$(GCC_PREPROCESSOR_DEFINITIONS)",
NDEBUG,
+ ENABLE_DEBUGGER_SUPPORT,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = NO;
diff --git a/tools/visual_studio/common.vsprops b/tools/visual_studio/common.vsprops
index 20bb119..fa78cdc 100644
--- a/tools/visual_studio/common.vsprops
+++ b/tools/visual_studio/common.vsprops
@@ -16,7 +16,7 @@
WarnAsError="true"
Detect64BitPortabilityProblems="false"
DebugInformationFormat="3"
- DisableSpecificWarnings="4355;4800"
+ DisableSpecificWarnings="4351;4355;4800"
EnableFunctionLevelLinking="true"
/>
<Tool
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index ceeebf9..5f76069 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -689,6 +689,14 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\lithium.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\lithium-allocator.cc"
>
</File>
@@ -697,6 +705,10 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium-allocator-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ia32\lithium-ia32.cc"
>
</File>
@@ -713,6 +725,14 @@
>
</File>
<File
+ RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\ia32\lithium-gap-resolver-ia32.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\liveedit.cc"
>
</File>
@@ -814,14 +834,6 @@
>
</File>
<File
- RelativePath="..\..\src\oprofile-agent.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\oprofile-agent.h"
- >
- </File>
- <File
RelativePath="..\..\src\parser.cc"
>
</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index cd4c52e..feb7e6c 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -697,6 +697,10 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium-allocator-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\arm\lithium-arm.cc"
>
</File>
@@ -812,14 +816,6 @@
>
</File>
<File
- RelativePath="..\..\src\oprofile-agent.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\oprofile-agent.h"
- >
- </File>
- <File
RelativePath="..\..\src\parser.cc"
>
</File>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 2c7bf5e..14ed77e 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -493,14 +493,6 @@
>
</File>
<File
- RelativePath="..\..\src\flow-graph.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\flow-graph.h"
- >
- </File>
- <File
RelativePath="..\..\src\frame-element.cc"
>
</File>
@@ -610,6 +602,22 @@
>
</File>
<File
+ RelativePath="..\..\src\hydrogen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\hydrogen.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\hydrogen-instructions.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\hydrogen-instructions.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\x64\ic-x64.cc"
>
</File>
@@ -682,6 +690,14 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\lithium.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\lithium-allocator.cc"
>
</File>
@@ -690,6 +706,34 @@
>
</File>
<File
+ RelativePath="..\..\src\lithium-allocator-inl.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-x64.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-codegen-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-codegen-x64.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-gap-resolver-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\lithium-gap-resolver-x64.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\liveedit.cc"
>
</File>
@@ -790,14 +834,6 @@
>
</File>
<File
- RelativePath="..\..\src\oprofile-agent.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\oprofile-agent.h"
- >
- </File>
- <File
RelativePath="..\..\src\parser.cc"
>
</File>
@@ -806,6 +842,22 @@
>
</File>
<File
+ RelativePath="..\..\src\preparser.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\preparser.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\preparse-data.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\preparse-data.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\profile-generator.cc"
>
</File>
@@ -930,6 +982,14 @@
>
</File>
<File
+ RelativePath="..\..\src\scanner-base.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\scanner-base.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\scanner.cc"
>
</File>
@@ -1086,6 +1146,14 @@
>
</File>
<File
+ RelativePath="..\..\src\v8checks.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\v8globals.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\v8threads.cc"
>
</File>
@@ -1094,6 +1162,10 @@
>
</File>
<File
+ RelativePath="..\..\src\v8utils.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\variables.cc"
>
</File>
@@ -1157,6 +1229,22 @@
RelativePath="..\..\src\zone.h"
>
</File>
+ <File
+ RelativePath="..\..\src\extensions\externalize-string-extension.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\extensions\externalize-string-extension.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\extensions\gc-extension.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\extensions\gc-extension.h"
+ >
+ </File>
<Filter
Name="third party"
>
@@ -1197,6 +1285,10 @@
RelativePath="..\..\include\v8.h"
>
</File>
+ <File
+ RelativePath="..\..\include\v8stdint.h"
+ >
+ </File>
</Filter>
</Files>
<Globals>